From 35c66e0ae91428b5799785d887d74a67e04cbf7f Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 23 Dec 2025 11:16:39 -0500 Subject: [PATCH 01/47] Updated git ignore and triggers TF deploy. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 714e1af..0e0857b 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ crash.log # version control. # # example.tfvars +*.auto.tfvars # Ignore override files as they are usually used to override resources locally and so # are not checked in From 7b4418b2e0d5df966d3c41f3cbf2c20473931914 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 23 Dec 2025 14:45:47 -0500 Subject: [PATCH 02/47] Update policy to Cluster Admin. --- terraform/modules/eks/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terraform/modules/eks/main.tf b/terraform/modules/eks/main.tf index 606e3be..35f6935 100644 --- a/terraform/modules/eks/main.tf +++ b/terraform/modules/eks/main.tf @@ -269,7 +269,7 @@ resource "aws_eks_access_policy_association" "admin_policy" { for_each = var.admin_role_arns cluster_name = aws_eks_cluster.eks_cluster.name - policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy" + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" principal_arn = each.value access_scope { From f7aa48cf82163d9d36b3724471eedb72ed1ba81b Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Wed, 7 Jan 2026 14:55:27 -0300 Subject: [PATCH 03/47] upgrade kubernetes version to 1.30 --- terraform/modules/node_group/main.tf | 2 +- terraform/vars/terraform-dev.tfvars | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/terraform/modules/node_group/main.tf b/terraform/modules/node_group/main.tf index 427c345..a04636f 100644 --- a/terraform/modules/node_group/main.tf +++ b/terraform/modules/node_group/main.tf @@ -15,7 +15,7 @@ resource "aws_eks_node_group" "eks-node-group" { node_group_name = "${var.node_group_name}-${random_id.eks-node-group.hex}" node_role_arn = var.node_role_arn subnet_ids = var.subnet_ids - release_version = var.eks_node_release_version +# release_version = var.eks_node_release_version capacity_type = var.capacity_type scaling_config { diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index 38ff88e..78020c2 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -10,8 +10,8 @@ rds_instance_count = 1 db_instance_class = "db.t3.medium" db_instance_count = 1 db_logs_exports = ["audit", "profiler"] -eks_version = "1.29" -eks_node_release_version = "1.29.0-20240202" +eks_version = "1.30" +eks_node_release_version = "1.29.0-20240202" #release_version commented in main.tf ebs_csi_addon_version = "v1.27.0-eksbuild.1" # apps_node_group_min_size = 1 # apps_node_group_max_size = 16 From 6df16b643eb7fcd1f4b4d07a74e115205d3e4ab4 Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Thu, 8 Jan 2026 16:10:59 -0300 Subject: [PATCH 04/47] set hibernate attribute as false --- terraform/vars/terraform-dev.tfvars | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index 78020c2..8dcccdc 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -22,6 +22,6 @@ ebs_csi_addon_version = "v1.27.0-eksbuild.1" # gfw_node_group_desired_size = 4 # gfw_node_group_min_size_upscaled = 2 gateway_node_group_desired_size = 0 -hibernate = true +hibernate = false aq_bucket_cors_allowed_origin = "*" deploy_sparkpost_templates = false From 85c1397d437353162289433bbf616dce65e02869 Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Thu, 8 Jan 2026 16:20:52 -0300 Subject: [PATCH 05/47] comment eks_node_release_version --- terraform/main.tf | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 1fc288e..77d7aec 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -75,7 +75,7 @@ module "mongodb-apps-node-group" { max_size = var.mongodb_apps_node_group_max_size desired_size = var.mongodb_apps_node_group_desired_size node_role_arn = module.eks.node_role_arn - eks_node_release_version = var.eks_node_release_version +# eks_node_release_version = var.eks_node_release_version capacity_type = var.mongodb_apps_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -98,7 +98,7 @@ module "apps-node-group" { max_size = var.apps_node_group_max_size desired_size = var.apps_node_group_desired_size node_role_arn = module.eks.node_role_arn - eks_node_release_version = var.eks_node_release_version +# eks_node_release_version = var.eks_node_release_version capacity_type = var.apps_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -123,7 +123,7 @@ module "webapps-node-group" { max_size = var.webapps_node_group_max_size desired_size = var.webapps_node_group_desired_size node_role_arn = module.eks.node_role_arn - eks_node_release_version = var.eks_node_release_version +# eks_node_release_version = var.eks_node_release_version capacity_type = var.webapps_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -148,7 +148,7 @@ module "core-node-group" { max_size = var.core_node_group_max_size desired_size = var.core_node_group_desired_size node_role_arn = module.eks.node_role_arn - eks_node_release_version = var.eks_node_release_version +# eks_node_release_version = var.eks_node_release_version capacity_type = var.core_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[5].id @@ -169,7 +169,7 @@ module "gfw-node-group" { max_size = var.gfw_node_group_max_size desired_size = var.gfw_node_group_desired_size node_role_arn = module.eks.node_role_arn - eks_node_release_version = var.eks_node_release_version +# eks_node_release_version = var.eks_node_release_version capacity_type = var.gfw_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -194,7 +194,7 @@ module "gateway-node-group" { max_size = var.gateway_node_group_max_size desired_size = var.gateway_node_group_desired_size node_role_arn = module.eks.node_role_arn - eks_node_release_version = var.eks_node_release_version +# eks_node_release_version = var.eks_node_release_version capacity_type = "ON_DEMAND" subnet_ids = [ module.vpc.private_subnets[0].id, From 2410e587be139e7fbca2b8dcf21fa50613cd879a Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Thu, 8 Jan 2026 16:26:11 -0300 Subject: [PATCH 06/47] set default value to eks_node_release_version variable --- terraform/variables.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/terraform/variables.tf b/terraform/variables.tf index 658de71..f86ccdc 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -232,6 +232,7 @@ variable "eks_version" { variable "eks_node_release_version" { type = string + default = "" description = "Version of EKS (kubernetes) node AMI to deploy" } From 22b12389b68ad4184704a3ec66bddf6a663f874e Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Thu, 8 Jan 2026 16:31:48 -0300 Subject: [PATCH 07/47] set undo comments --- terraform/main.tf | 12 ++++++------ terraform/modules/node_group/main.tf | 2 +- terraform/vars/terraform-dev.tfvars | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 77d7aec..1fc288e 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -75,7 +75,7 @@ module "mongodb-apps-node-group" { max_size = var.mongodb_apps_node_group_max_size desired_size = var.mongodb_apps_node_group_desired_size node_role_arn = module.eks.node_role_arn -# eks_node_release_version = var.eks_node_release_version + eks_node_release_version = var.eks_node_release_version capacity_type = var.mongodb_apps_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -98,7 +98,7 @@ module "apps-node-group" { max_size = var.apps_node_group_max_size desired_size = var.apps_node_group_desired_size node_role_arn = module.eks.node_role_arn -# eks_node_release_version = var.eks_node_release_version + eks_node_release_version = var.eks_node_release_version capacity_type = var.apps_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -123,7 +123,7 @@ module "webapps-node-group" { max_size = var.webapps_node_group_max_size desired_size = var.webapps_node_group_desired_size node_role_arn = module.eks.node_role_arn -# eks_node_release_version = var.eks_node_release_version + eks_node_release_version = var.eks_node_release_version capacity_type = var.webapps_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -148,7 +148,7 @@ module "core-node-group" { max_size = var.core_node_group_max_size desired_size = var.core_node_group_desired_size node_role_arn = module.eks.node_role_arn -# eks_node_release_version = var.eks_node_release_version + eks_node_release_version = var.eks_node_release_version capacity_type = var.core_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[5].id @@ -169,7 +169,7 @@ module "gfw-node-group" { max_size = var.gfw_node_group_max_size desired_size = var.gfw_node_group_desired_size node_role_arn = module.eks.node_role_arn -# eks_node_release_version = var.eks_node_release_version + eks_node_release_version = var.eks_node_release_version capacity_type = var.gfw_node_group_capacity_type subnet_ids = [ module.vpc.private_subnets[0].id, @@ -194,7 +194,7 @@ module "gateway-node-group" { max_size = var.gateway_node_group_max_size desired_size = var.gateway_node_group_desired_size node_role_arn = module.eks.node_role_arn -# eks_node_release_version = var.eks_node_release_version + eks_node_release_version = var.eks_node_release_version capacity_type = "ON_DEMAND" subnet_ids = [ module.vpc.private_subnets[0].id, diff --git a/terraform/modules/node_group/main.tf b/terraform/modules/node_group/main.tf index a04636f..427c345 100644 --- a/terraform/modules/node_group/main.tf +++ b/terraform/modules/node_group/main.tf @@ -15,7 +15,7 @@ resource "aws_eks_node_group" "eks-node-group" { node_group_name = "${var.node_group_name}-${random_id.eks-node-group.hex}" node_role_arn = var.node_role_arn subnet_ids = var.subnet_ids -# release_version = var.eks_node_release_version + release_version = var.eks_node_release_version capacity_type = var.capacity_type scaling_config { diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index 8dcccdc..4d68129 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -11,7 +11,7 @@ db_instance_class = "db.t3.medium" db_instance_count = 1 db_logs_exports = ["audit", "profiler"] eks_version = "1.30" -eks_node_release_version = "1.29.0-20240202" #release_version commented in main.tf +# eks_node_release_version = "1.29.0-20240202" ebs_csi_addon_version = "v1.27.0-eksbuild.1" # apps_node_group_min_size = 1 # apps_node_group_max_size = 16 From ad8fd34ebc1588ca9249312efea6e1c14596d6d2 Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Mon, 12 Jan 2026 16:15:12 -0300 Subject: [PATCH 08/47] set ebs_csi_addon_version as null --- terraform/variables.tf | 3 ++- terraform/vars/terraform-dev.tfvars | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/terraform/variables.tf b/terraform/variables.tf index f86ccdc..67eae6f 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -232,12 +232,13 @@ variable "eks_version" { variable "eks_node_release_version" { type = string - default = "" + default = "" description = "Version of EKS (kubernetes) node AMI to deploy" } variable "ebs_csi_addon_version" { type = string + default = null description = "Version of AWS EBS CRI driver to use" } diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index 4d68129..edeaf7b 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -12,7 +12,7 @@ db_instance_count = 1 db_logs_exports = ["audit", "profiler"] eks_version = "1.30" # eks_node_release_version = "1.29.0-20240202" -ebs_csi_addon_version = "v1.27.0-eksbuild.1" +# ebs_csi_addon_version = "v1.27.0-eksbuild.1" # apps_node_group_min_size = 1 # apps_node_group_max_size = 16 # apps_node_group_desired_size = 3 From 55a1bbf84b3959080bbc1d6a6fa05dfcebb6ab49 Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Mon, 12 Jan 2026 16:21:56 -0300 Subject: [PATCH 09/47] update ebs_csi_addon_version variable to v1.30.0-eksbuild.1 to solve upgrade alert on AWS console --- terraform/variables.tf | 1 - terraform/vars/terraform-dev.tfvars | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/terraform/variables.tf b/terraform/variables.tf index 67eae6f..72ea704 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -238,7 +238,6 @@ variable "eks_node_release_version" { variable "ebs_csi_addon_version" { type = string - default = null description = "Version of AWS EBS CRI driver to use" } diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index edeaf7b..ae816f9 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -12,7 +12,7 @@ db_instance_count = 1 db_logs_exports = ["audit", "profiler"] eks_version = "1.30" # eks_node_release_version = "1.29.0-20240202" -# ebs_csi_addon_version = "v1.27.0-eksbuild.1" +ebs_csi_addon_version = "v1.30.0-eksbuild.1" # apps_node_group_min_size = 1 # apps_node_group_max_size = 16 # apps_node_group_desired_size = 3 From 1e5a6b6558d856433f34850cee44c07a027e8dff Mon Sep 17 00:00:00 2001 From: Anaue Curi Date: Mon, 12 Jan 2026 19:03:58 -0300 Subject: [PATCH 10/47] add kube-proxy as add-on compatible with kubernetes 1.30 --- terraform/main.tf | 1 + terraform/modules/eks/main.tf | 8 +++++++- terraform/modules/eks/variable.tf | 7 ++++++- terraform/variables.tf | 5 +++++ terraform/vars/terraform-dev.tfvars | 1 + 5 files changed, 20 insertions(+), 2 deletions(-) diff --git a/terraform/main.tf b/terraform/main.tf index 1fc288e..fa7947f 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -54,6 +54,7 @@ module "eks" { eks_version = var.eks_version aws_region = var.aws_region ebs_csi_addon_version = var.ebs_csi_addon_version + kube_proxy_addon_version = var.kube_proxy_addon_version admin_role_arns = data.aws_iam_roles.admin_arn.arns subnet_ids = [ module.vpc.private_subnets[0].id, diff --git a/terraform/modules/eks/main.tf b/terraform/modules/eks/main.tf index 35f6935..c9d0d1a 100644 --- a/terraform/modules/eks/main.tf +++ b/terraform/modules/eks/main.tf @@ -44,6 +44,12 @@ resource "aws_eks_addon" "aws_ebs_csi_driver" { service_account_role_arn = aws_iam_role.ebs_csi_iam_role.arn } +resource "aws_eks_addon" "kube_proxy" { + cluster_name = aws_eks_cluster.eks_cluster.name + addon_name = "kube-proxy" + addon_version = var.kube_proxy_addon_version +} + resource "aws_security_group" "eks_cluster_security_group" { name = "${replace(var.project, " ", "-")}eks-cluster-security-group" description = "Cluster communication with worker nodes" @@ -279,4 +285,4 @@ resource "aws_eks_access_policy_association" "admin_policy" { depends_on = [ aws_eks_access_entry.admin_role ] -} \ No newline at end of file +} diff --git a/terraform/modules/eks/variable.tf b/terraform/modules/eks/variable.tf index a9a8259..5de88da 100644 --- a/terraform/modules/eks/variable.tf +++ b/terraform/modules/eks/variable.tf @@ -30,6 +30,11 @@ variable "ebs_csi_addon_version" { description = "Version of AWS EBS CRI driver to use" } +variable "kube_proxy_addon_version" { + type = string + description = "Version of kube-proxy to use" +} + variable "backups_bucket" { type = string description = "S3 bucket to which backups will be performed" @@ -38,4 +43,4 @@ variable "backups_bucket" { variable "admin_role_arns" { type = set(string) description = "ARN of the Role used for admin cluster access." -} \ No newline at end of file +} diff --git a/terraform/variables.tf b/terraform/variables.tf index 72ea704..6e2d9e4 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -241,6 +241,11 @@ variable "ebs_csi_addon_version" { description = "Version of AWS EBS CRI driver to use" } +variable "kube_proxy_addon_version" { + type = string + description = "Version of kube-proxy to use" +} + variable "hibernate" { description = "If set to true, the EKS cluster will be scaled down and its services unavailable" type = bool diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index ae816f9..2a605ff 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -13,6 +13,7 @@ db_logs_exports = ["audit", "profiler"] eks_version = "1.30" # eks_node_release_version = "1.29.0-20240202" ebs_csi_addon_version = "v1.30.0-eksbuild.1" +kube_proxy_addon_version = "v1.30.14-eksbuild.18" # apps_node_group_min_size = 1 # apps_node_group_max_size = 16 # apps_node_group_desired_size = 3 From 2b6fa3f9ca0cb6a9b9fd297b97027e40a1753052 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 23 Jan 2026 09:13:14 -0500 Subject: [PATCH 11/47] Refresh TF state. --- .../.terraform.lock.hcl | 26 +++++-------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/terraform-k8s-infrastructure/.terraform.lock.hcl b/terraform-k8s-infrastructure/.terraform.lock.hcl index 785e9bf..80d3d94 100644 --- a/terraform-k8s-infrastructure/.terraform.lock.hcl +++ b/terraform-k8s-infrastructure/.terraform.lock.hcl @@ -6,6 +6,7 @@ provider "registry.terraform.io/cloudflare/cloudflare" { constraints = "~> 3.30.0" hashes = [ "h1:VoKtn0DsmPCwOWirDIzxhjAnLKlHy4WFcYjtcg/FG/U=", + "h1:h+FHHosP01pKaH0Tn5+pId3IT6vBE6uHQxvEtNeL/KY=", "zh:1f1a09c954f21fc4665292b898db8c12c8b2083cfb02fb4fffa3b9db1df5a789", "zh:3212a58a15d69ba0781f4d60290164d8690f831d9f8b8d35c21e6616620e0cb0", "zh:574af296091adb2c109547f5ef919bae90a0ef72a86894e40d93304551b5b176", @@ -28,6 +29,7 @@ provider "registry.terraform.io/gavinbunney/kubectl" { constraints = "~> 1.14.0" hashes = [ "h1:gLFn+RvP37sVzp9qnFCwngRjjFV649r6apjxvJ1E/SE=", + "h1:mX2AOFIMIxJmW5kM8DT51gloIOKCr9iT6W8yodnUyfs=", "zh:0350f3122ff711984bbc36f6093c1fe19043173fad5a904bce27f86afe3cc858", "zh:07ca36c7aa7533e8325b38232c77c04d6ef1081cb0bac9d56e8ccd51f12f2030", "zh:0c351afd91d9e994a71fe64bbd1662d0024006b3493bb61d46c23ea3e42a7cf5", @@ -40,30 +42,12 @@ provider "registry.terraform.io/gavinbunney/kubectl" { ] } -provider "registry.terraform.io/hashicorp/archive" { - version = "2.4.0" - hashes = [ - "h1:EtN1lnoHoov3rASpgGmh6zZ/W6aRCTgKC7iMwvFY1yc=", - "zh:18e408596dd53048f7fc8229098d0e3ad940b92036a24287eff63e2caec72594", - "zh:392d4216ecd1a1fd933d23f4486b642a8480f934c13e2cae3c13b6b6a7e34a7b", - "zh:655dd1fa5ca753a4ace21d0de3792d96fff429445717f2ce31c125d19c38f3ff", - "zh:70dae36c176aa2b258331ad366a471176417a94dd3b4985a911b8be9ff842b00", - "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:7d8c8e3925f1e21daf73f85983894fbe8868e326910e6df3720265bc657b9c9c", - "zh:a032ec0f0aee27a789726e348e8ad20778c3a1c9190ef25e7cff602c8d175f44", - "zh:b8e50de62ba185745b0fe9713755079ad0e9f7ac8638d204de6762cc36870410", - "zh:c8ad0c7697a3d444df21ff97f3473a8604c8639be64afe3f31b8ec7ad7571e18", - "zh:df736c5a2a7c3a82c5493665f659437a22f0baf8c2d157e45f4dd7ca40e739fc", - "zh:e8ffbf578a0977074f6d08aa8734e36c726e53dc79894cfc4f25fadc4f45f1df", - "zh:efea57ff23b141551f92b2699024d356c7ffd1a4ad62931da7ed7a386aef7f1f", - ] -} - provider "registry.terraform.io/hashicorp/aws" { version = "4.48.0" constraints = "~> 4.48.0" hashes = [ "h1:Fz26mWZmM9syrY91aPeTdd3hXG4DvMR81ylWC9xE2uA=", + "h1:t/R3B4mibkp2zLer4MfhFbwHAVLAq71mJz4nwdUydBE=", "zh:08f5e3c5256a4fbd5c988863d10e5279172b2470fec6d4fb13c372663e7f7cac", "zh:2a04376b7fa84681bd2938973c7d0822c8c0f0656a4e7661a2f50ac4d852d4a3", "zh:30d6cdf321aaba874934cbde505333d89d172d8d5ffcf40b6e66626c57bc6ab2", @@ -86,6 +70,7 @@ provider "registry.terraform.io/hashicorp/helm" { version = "2.8.0" constraints = "~> 2.8.0" hashes = [ + "h1:U0w0mUT0SwZCR0poGNSxGaZJKWcOiu4GerpGztYBiMM=", "h1:abRryu69lsIGXctqjMVoaKqi74eE12Vzd2FLpds1/PI=", "zh:1e42d1a04c07d4006844e477ca32b5f45b04f6525dbbbe00b6be6e6ec5a11c54", "zh:2f87187cb48ccfb18d12e2c4332e7e822923b659e7339b954b7db78aff91529f", @@ -106,6 +91,7 @@ provider "registry.terraform.io/hashicorp/kubernetes" { version = "2.16.1" constraints = "~> 2.16.1" hashes = [ + "h1:PO4Ye/+lu5hCaUEOtwNOldQYoA0dqL1bcBICIpdlcd8=", "h1:i+DwtJK82sIWmTcQA9lL0mlET+14/QpUqv10fU2o3As=", "zh:06224975f5910d41e73b35a4d5079861da2c24f9353e3ebb015fbb3b3b996b1c", "zh:2bc400a8d9fe7755cca27c2551564a9e2609cfadc77f526ef855114ee02d446f", @@ -126,6 +112,7 @@ provider "registry.terraform.io/hashicorp/null" { version = "3.2.1" hashes = [ "h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=", + "h1:tSj1mL6OQ8ILGqR2mDu7OYYYWf+hoir0pf9KAQ8IzO8=", "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840", "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb", "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5", @@ -144,6 +131,7 @@ provider "registry.terraform.io/hashicorp/null" { provider "registry.terraform.io/hashicorp/template" { version = "2.2.0" hashes = [ + "h1:0wlehNaxBX7GJQnPfQwTNvvAf38Jm0Nv7ssKGMaG6Og=", "h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=", "zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386", "zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53", From 8144fec9cd74ee091ff6c7ce24f18f61ee2fe78d Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 6 Feb 2026 16:13:47 -0500 Subject: [PATCH 12/47] Adds access entry for GHA OIDC role. --- terraform/main.tf | 1 + terraform/modules/eks/main.tf | 20 ++++++++++++++++++++ terraform/modules/eks/variable.tf | 5 +++++ terraform/variables.tf | 5 +++++ terraform/vars/terraform-dev.tfvars | 1 + 5 files changed, 32 insertions(+) diff --git a/terraform/main.tf b/terraform/main.tf index fa7947f..67d9f02 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -56,6 +56,7 @@ module "eks" { ebs_csi_addon_version = var.ebs_csi_addon_version kube_proxy_addon_version = var.kube_proxy_addon_version admin_role_arns = data.aws_iam_roles.admin_arn.arns + gha_role_arn = var.gha_role_arn subnet_ids = [ module.vpc.private_subnets[0].id, module.vpc.private_subnets[1].id, diff --git a/terraform/modules/eks/main.tf b/terraform/modules/eks/main.tf index c9d0d1a..8e0f701 100644 --- a/terraform/modules/eks/main.tf +++ b/terraform/modules/eks/main.tf @@ -286,3 +286,23 @@ resource "aws_eks_access_policy_association" "admin_policy" { aws_eks_access_entry.admin_role ] } + +resource "aws_eks_access_entry" "gha_role" { + cluster_name = aws_eks_cluster.eks_cluster.name + principal_arn = var.gha_role_arn + type = "STANDARD" +} + +resource "aws_eks_access_policy_association" "gha_policy" { + cluster_name = aws_eks_cluster.eks_cluster.name + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + principal_arn = var.gha_role_arn + + access_scope { + type = "cluster" + } + + depends_on = [ + aws_eks_access_entry.gha_role + ] +} \ No newline at end of file diff --git a/terraform/modules/eks/variable.tf b/terraform/modules/eks/variable.tf index 5de88da..109db78 100644 --- a/terraform/modules/eks/variable.tf +++ b/terraform/modules/eks/variable.tf @@ -44,3 +44,8 @@ variable "admin_role_arns" { type = set(string) description = "ARN of the Role used for admin cluster access." } + +variable "gha_role_arn" { + type = string + description = "ARN of the Role used for Github Actions." +} diff --git a/terraform/variables.tf b/terraform/variables.tf index 6e2d9e4..48228a9 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -283,3 +283,8 @@ variable "email_recipients" { description = "List of email addresses to contact in case an alert fails" default = [] } + +variable "gha_role_arn" { + type = string + description = "ARN of the Role used for Github Actions." +} diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index 2a605ff..8aa3717 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -26,3 +26,4 @@ gateway_node_group_desired_size = 0 hibernate = false aq_bucket_cors_allowed_origin = "*" deploy_sparkpost_templates = false +gha_role_arn = "arn:aws:iam::842534099497:role/wri-api-dev-githubactions-role" \ No newline at end of file From 3b3833c3bc1b6c6986889bc2c1e1fcf5ca5fe17e Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Mon, 26 Jan 2026 14:19:18 -0500 Subject: [PATCH 13/47] Add TF Plan to GHA for k8s, and fix cloudflare keys. Remove unneeded Sparkpost variable. --- .github/workflows/terraform_plan.yaml | 35 ++++++++++++++++++- .../.terraform.lock.hcl | 27 +++++++------- terraform-k8s-infrastructure/main.tf | 12 ++++--- .../modules/k8s_infrastructure/variable.tf | 10 ++++++ .../modules/k8s_infrastructure/versions.tf | 10 ++++++ .../k8s_microservice_routing/variables.tf | 10 ++++++ .../k8s_microservice_routing/versions.tf | 5 +++ terraform-k8s-infrastructure/variables.tf | 17 +++++++++ terraform-k8s-infrastructure/versions.tf | 10 ++++++ 9 files changed, 117 insertions(+), 19 deletions(-) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 11565d0..2672f76 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -5,7 +5,7 @@ on: branches: [dev, staging] jobs: - plan: + plan-infra: runs-on: ubuntu-latest env: @@ -38,3 +38,36 @@ jobs: -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" + + plan-k8s: + runs-on: ubuntu-latest + + env: + ENV: ${{ github.base_ref }} + AWS_ACCESS_KEY_ID: >- + ${{ github.base_ref == 'production' && secrets.aws_key_production || + github.base_ref == 'staging' && secrets.aws_key_staging || + secrets.aws_key_dev }} + AWS_SECRET_ACCESS_KEY: >- + ${{ github.base_ref == 'production' && secrets.aws_secret_production || + github.base_ref == 'staging' && secrets.aws_secret_staging || + secrets.aws_secret_dev }} + AWS_REGION: >- + ${{ github.base_ref == 'production' && secrets.aws_region_production || + github.base_ref == 'staging' && secrets.aws_region_staging || + secrets.aws_region_dev }} + TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} + TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} + + steps: + - uses: actions/checkout@v1 + + - name: TF Init + run: ./scripts/infra -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars + + - name: TF Plan + run: | + ./scripts/infra -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ + -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ + -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ + -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" diff --git a/terraform-k8s-infrastructure/.terraform.lock.hcl b/terraform-k8s-infrastructure/.terraform.lock.hcl index 80d3d94..cc9b02c 100644 --- a/terraform-k8s-infrastructure/.terraform.lock.hcl +++ b/terraform-k8s-infrastructure/.terraform.lock.hcl @@ -109,22 +109,21 @@ provider "registry.terraform.io/hashicorp/kubernetes" { } provider "registry.terraform.io/hashicorp/null" { - version = "3.2.1" + version = "3.2.4" hashes = [ - "h1:FbGfc+muBsC17Ohy5g806iuI1hQc4SIexpYCrQHQd8w=", - "h1:tSj1mL6OQ8ILGqR2mDu7OYYYWf+hoir0pf9KAQ8IzO8=", - "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840", - "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb", - "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5", - "zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3", + "h1:hkf5w5B6q8e2A42ND2CjAvgvSN3puAosDmOJb3zCVQM=", + "zh:59f6b52ab4ff35739647f9509ee6d93d7c032985d9f8c6237d1f8a59471bbbe2", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", - "zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238", - "zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc", - "zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970", - "zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2", - "zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5", - "zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f", - "zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694", + "zh:795c897119ff082133150121d39ff26cb5f89a730a2c8c26f3a9c1abf81a9c43", + "zh:7b9c7b16f118fbc2b05a983817b8ce2f86df125857966ad356353baf4bff5c0a", + "zh:85e33ab43e0e1726e5f97a874b8e24820b6565ff8076523cc2922ba671492991", + "zh:9d32ac3619cfc93eb3c4f423492a8e0f79db05fec58e449dee9b2d5873d5f69f", + "zh:9e15c3c9dd8e0d1e3731841d44c34571b6c97f5b95e8296a45318b94e5287a6e", + "zh:b4c2ab35d1b7696c30b64bf2c0f3a62329107bd1a9121ce70683dec58af19615", + "zh:c43723e8cc65bcdf5e0c92581dcbbdcbdcf18b8d2037406a5f2033b1e22de442", + "zh:ceb5495d9c31bfb299d246ab333f08c7fb0d67a4f82681fbf47f2a21c3e11ab5", + "zh:e171026b3659305c558d9804062762d168f50ba02b88b231d20ec99578a6233f", + "zh:ed0fe2acdb61330b01841fa790be00ec6beaac91d41f311fb8254f74eb6a711f", ] } diff --git a/terraform-k8s-infrastructure/main.tf b/terraform-k8s-infrastructure/main.tf index 1c85f00..1e68a42 100644 --- a/terraform-k8s-infrastructure/main.tf +++ b/terraform-k8s-infrastructure/main.tf @@ -18,17 +18,19 @@ data "aws_vpc" "eks_vpc" { module "k8s_infrastructure" { source = "./modules/k8s_infrastructure" - cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:4433" + cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" cluster_ca = data.aws_eks_cluster.rw_api.certificate_authority.0.data cluster_name = data.aws_eks_cluster.rw_api.name aws_region = var.aws_region vpc_id = data.aws_vpc.eks_vpc.id deploy_metrics_server = var.deploy_metrics_server + cloudflare_api_key = var.cloudflare_api_key + cloudflare_email = var.cloudflare_email } module "k8s_data_layer" { source = "./modules/k8s_data_layer" - cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:4433" + cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" cluster_ca = data.aws_eks_cluster.rw_api.certificate_authority.0.data cluster_name = data.aws_eks_cluster.rw_api.name aws_region = var.aws_region @@ -45,18 +47,20 @@ module "k8s_microservice_routing" { environment = var.environment dns_prefix = var.dns_prefix vpc = data.aws_vpc.eks_vpc - cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:4433" + cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" cluster_ca = data.aws_eks_cluster.rw_api.certificate_authority.0.data cluster_name = data.aws_eks_cluster.rw_api.name tf_core_state_bucket = var.tf_core_state_bucket x_rw_domain = var.x_rw_domain fw_backend_url = var.fw_backend_url require_api_key = var.require_api_key + cloudflare_api_key = var.cloudflare_api_key + cloudflare_email = var.cloudflare_email } module "k8s_namespaces" { source = "./modules/k8s_namespaces" - cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:4433" + cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" cluster_ca = data.aws_eks_cluster.rw_api.certificate_authority.0.data cluster_name = data.aws_eks_cluster.rw_api.name kubectl_context = "aws-rw-${var.environment}" diff --git a/terraform-k8s-infrastructure/modules/k8s_infrastructure/variable.tf b/terraform-k8s-infrastructure/modules/k8s_infrastructure/variable.tf index 03672f3..c18a6b6 100644 --- a/terraform-k8s-infrastructure/modules/k8s_infrastructure/variable.tf +++ b/terraform-k8s-infrastructure/modules/k8s_infrastructure/variable.tf @@ -28,3 +28,13 @@ variable "deploy_metrics_server" { type = bool description = "If AWS Metrics server should be deployed" } + +variable "cloudflare_api_key" { + type = string + description = "Cloudflare API key" +} + +variable "cloudflare_email" { + type = string + description = "Cloudflare email" +} diff --git a/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf b/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf index d928a93..f035d31 100644 --- a/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf @@ -14,6 +14,11 @@ terraform { source = "hashicorp/helm" version = "~> 2.8.0" } + + cloudflare = { + source = "cloudflare/cloudflare" + version = "~> 3.30.0" + } } required_version = "1.3.6" } @@ -22,6 +27,11 @@ provider "aws" { region = var.aws_region } +provider "cloudflare" { + api_key = var.cloudflare_api_key + email = var.cloudflare_email +} + provider "helm" { kubernetes { host = var.cluster_endpoint diff --git a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/variables.tf b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/variables.tf index aea05bf..0bbe06d 100644 --- a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/variables.tf +++ b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/variables.tf @@ -66,3 +66,13 @@ variable "require_api_key" { type = bool default = false } + +variable "cloudflare_api_key" { + type = string + description = "Cloudflare API key" +} + +variable "cloudflare_email" { + type = string + description = "Cloudflare email" +} \ No newline at end of file diff --git a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf index 109bf07..70710e3 100644 --- a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf @@ -15,3 +15,8 @@ terraform { } required_version = "1.3.6" } + +provider "cloudflare" { + api_key = var.cloudflare_api_key + email = var.cloudflare_email +} \ No newline at end of file diff --git a/terraform-k8s-infrastructure/variables.tf b/terraform-k8s-infrastructure/variables.tf index 0b56188..5996515 100644 --- a/terraform-k8s-infrastructure/variables.tf +++ b/terraform-k8s-infrastructure/variables.tf @@ -15,6 +15,12 @@ variable "aws_region" { description = "A valid AWS region to configure the underlying AWS SDK." } +variable "cluster_port" { + type = string + description = "THe k8s cluster port, if different from 443." + default = "443" +} + variable "dns_prefix" { type = string description = "DNS prefix for public URLs created in this project." @@ -87,3 +93,14 @@ variable "fw_backend_url" { variable "require_api_key" { type = bool } + +variable "cloudflare_api_key" { + type = string + description = "Cloudflare API key" +} + +variable "cloudflare_email" { + type = string + description = "Cloudflare email" +} + diff --git a/terraform-k8s-infrastructure/versions.tf b/terraform-k8s-infrastructure/versions.tf index a9453dc..9c10bd8 100644 --- a/terraform-k8s-infrastructure/versions.tf +++ b/terraform-k8s-infrastructure/versions.tf @@ -4,6 +4,11 @@ terraform { source = "hashicorp/aws" version = "~> 4.48.0" } + + cloudflare = { + source = "cloudflare/cloudflare" + version = "~> 3.30.0" + } } required_version = "1.3.6" } @@ -11,3 +16,8 @@ terraform { provider "aws" { region = var.aws_region } + +provider "cloudflare" { + api_key = var.cloudflare_api_key + email = var.cloudflare_email +} \ No newline at end of file From 339011a885884393021c1bd0024427cb87c72368 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Mon, 26 Jan 2026 14:23:15 -0500 Subject: [PATCH 14/47] Add steps to existing job in order to ensure sequential run. --- .github/workflows/terraform_plan.yaml | 31 ++++----------------------- 1 file changed, 4 insertions(+), 27 deletions(-) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 2672f76..064014e 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -5,7 +5,7 @@ on: branches: [dev, staging] jobs: - plan-infra: + plan: runs-on: ubuntu-latest env: @@ -38,30 +38,7 @@ jobs: -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" - - plan-k8s: - runs-on: ubuntu-latest - - env: - ENV: ${{ github.base_ref }} - AWS_ACCESS_KEY_ID: >- - ${{ github.base_ref == 'production' && secrets.aws_key_production || - github.base_ref == 'staging' && secrets.aws_key_staging || - secrets.aws_key_dev }} - AWS_SECRET_ACCESS_KEY: >- - ${{ github.base_ref == 'production' && secrets.aws_secret_production || - github.base_ref == 'staging' && secrets.aws_secret_staging || - secrets.aws_secret_dev }} - AWS_REGION: >- - ${{ github.base_ref == 'production' && secrets.aws_region_production || - github.base_ref == 'staging' && secrets.aws_region_staging || - secrets.aws_region_dev }} - TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} - TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} - - steps: - - uses: actions/checkout@v1 - + - name: TF Init run: ./scripts/infra -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars @@ -69,5 +46,5 @@ jobs: run: | ./scripts/infra -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" + -var "cloudflare_email=${TF_VAR_cloudflare_email}" + From f768b693b9ac66250f36bdcb95e9c09071d35cef Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Mon, 26 Jan 2026 14:34:10 -0500 Subject: [PATCH 15/47] Ensure AWS cli is installed. Fix indentation. --- .github/workflows/terraform_plan.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 064014e..22a2be9 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -29,6 +29,11 @@ jobs: steps: - uses: actions/checkout@v1 + - name: Install AWS CLI + run: | + sudo apt-get update + sudo apt-get install -y awscli + - name: TF Init run: ./scripts/infra -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars From 63fb1772bd67276043ec5240914fd8dbd316fe6f Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 27 Jan 2026 16:26:04 -0500 Subject: [PATCH 16/47] Build custom image with AWS CLI installed. Try without docker --- .github/workflows/terraform_plan.yaml | 20 +++++++++++--------- terraform/docker/Dockerfile | 8 ++++++++ terraform/docker/docker-compose.yml | 3 ++- 3 files changed, 21 insertions(+), 10 deletions(-) create mode 100644 terraform/docker/Dockerfile diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 22a2be9..51bcb41 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -27,29 +27,31 @@ jobs: TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} steps: - - uses: actions/checkout@v1 + - name: Checkout repository + uses: actions/checkout@v4 - - name: Install AWS CLI - run: | - sudo apt-get update - sudo apt-get install -y awscli + - name: Setup terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.3.6 - name: TF Init - run: ./scripts/infra -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars + #run: ./scripts/infra -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars + run: terraform -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars - name: TF Plan run: | - ./scripts/infra -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ + terraform -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" - name: TF Init - run: ./scripts/infra -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars + run: terraform -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars - name: TF Plan run: | - ./scripts/infra -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ + terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" diff --git a/terraform/docker/Dockerfile b/terraform/docker/Dockerfile new file mode 100644 index 0000000..3bf3886 --- /dev/null +++ b/terraform/docker/Dockerfile @@ -0,0 +1,8 @@ +FROM hashicorp/terraform:1.3.6 + +RUN apk add --upgrade --no-cache curl unzip \ + && curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o /tmp/awscliv2.zip \ + && unzip /tmp/awscliv2.zip -d /tmp \ + && /tmp/aws/install \ + && rm -rf /tmp/awscliv2.zip /tmp/aws \ + && hash aws diff --git a/terraform/docker/docker-compose.yml b/terraform/docker/docker-compose.yml index 47419f9..c9caaff 100644 --- a/terraform/docker/docker-compose.yml +++ b/terraform/docker/docker-compose.yml @@ -1,6 +1,7 @@ services: terraform: - image: hashicorp/terraform:1.3.6 + #image: hashicorp/terraform:1.3.6 + build: . volumes: - ../../:/usr/local/src - $HOME/.aws:/root/.aws:ro From ff91388a9168a09974036a6dae04f95a247b0bd3 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 27 Jan 2026 17:04:58 -0500 Subject: [PATCH 17/47] Add kubeconfig setup. Switch all k8s providers to using a token generated up front. Missed one exec. Trying to fix helm/k8s auth. Cleanup of provider configurations. --- .github/workflows/terraform_plan.yaml | 3 + .../modules/k8s_data_layer/main.tf | 3 - .../modules/k8s_data_layer/versions.tf | 39 ------------- .../alb_ingress/versions.tf | 6 +- .../modules/k8s_infrastructure/main.tf | 11 ---- .../modules/k8s_infrastructure/versions.tf | 42 +++----------- .../modules/k8s_microservice_routing/main.tf | 10 ---- .../k8s_microservice_routing/versions.tf | 17 +++++- .../modules/k8s_namespaces/versions.tf | 10 +--- terraform-k8s-infrastructure/versions.tf | 56 +++++++++++++++++++ 10 files changed, 83 insertions(+), 114 deletions(-) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 51bcb41..0ee0365 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -49,6 +49,9 @@ jobs: - name: TF Init run: terraform -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars + - name: Configure Kubeconfig + run: aws eks update-kubeconfig --region us-east-1 --name core-k8s-cluster-$ENV + - name: TF Plan run: | terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/main.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/main.tf index b1fe765..e69de29 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/main.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/main.tf @@ -1,3 +0,0 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} \ No newline at end of file diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/versions.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/versions.tf index b303265..4f6ac2a 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/versions.tf @@ -17,42 +17,3 @@ terraform { } required_version = "1.3.6" } - -provider "aws" { - region = var.aws_region -} - -provider "kubernetes" { - host = var.cluster_endpoint - config_path = "~/.kube/config" - cluster_ca_certificate = base64decode(var.cluster_ca) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", var.cluster_name] - command = "aws" - } -} - -provider "kubectl" { - host = var.cluster_endpoint - cluster_ca_certificate = base64decode(var.cluster_ca) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false -} - -provider "helm" { - kubernetes { - host = var.cluster_endpoint - cluster_ca_certificate = base64decode(var.cluster_ca) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = [ - "eks", - "get-token", - "--cluster-name", - var.cluster_name - ] - command = "aws" - } - } -} diff --git a/terraform-k8s-infrastructure/modules/k8s_infrastructure/alb_ingress/versions.tf b/terraform-k8s-infrastructure/modules/k8s_infrastructure/alb_ingress/versions.tf index 48161ee..a832e6f 100644 --- a/terraform-k8s-infrastructure/modules/k8s_infrastructure/alb_ingress/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_infrastructure/alb_ingress/versions.tf @@ -10,8 +10,4 @@ terraform { } } required_version = "~> 1.3.2" -} - -provider "kubernetes" { - config_path = "~/.kube/config" -} +} \ No newline at end of file diff --git a/terraform-k8s-infrastructure/modules/k8s_infrastructure/main.tf b/terraform-k8s-infrastructure/modules/k8s_infrastructure/main.tf index 55fcd73..0a6a766 100644 --- a/terraform-k8s-infrastructure/modules/k8s_infrastructure/main.tf +++ b/terraform-k8s-infrastructure/modules/k8s_infrastructure/main.tf @@ -1,14 +1,3 @@ -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} - -provider "kubectl" { - host = var.cluster_endpoint - cluster_ca_certificate = base64decode(var.cluster_ca) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false -} - #// https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html #// ALB Ingress Controller module "alb" { diff --git a/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf b/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf index f035d31..3667555 100644 --- a/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_infrastructure/versions.tf @@ -5,46 +5,20 @@ terraform { version = "~> 4.48.0" } - kubectl = { - source = "gavinbunney/kubectl" - version = "~> 1.14.0" - } - - helm = { - source = "hashicorp/helm" - version = "~> 2.8.0" - } - cloudflare = { source = "cloudflare/cloudflare" version = "~> 3.30.0" } - } - required_version = "1.3.6" -} - -provider "aws" { - region = var.aws_region -} -provider "cloudflare" { - api_key = var.cloudflare_api_key - email = var.cloudflare_email -} + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.16.1" + } -provider "helm" { - kubernetes { - host = var.cluster_endpoint - cluster_ca_certificate = base64decode(var.cluster_ca) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = [ - "eks", - "get-token", - "--cluster-name", - var.cluster_name - ] - command = "aws" + kubectl = { + source = "gavinbunney/kubectl" + version = "~> 1.14.0" } } + required_version = "1.3.6" } diff --git a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/main.tf b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/main.tf index e06c951..078efbb 100644 --- a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/main.tf +++ b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/main.tf @@ -8,16 +8,6 @@ data "terraform_remote_state" "core" { } } -provider "kubernetes" { - host = var.cluster_endpoint - cluster_ca_certificate = base64decode(var.cluster_ca) - exec { - api_version = "client.authentication.k8s.io/v1beta1" - args = ["eks", "get-token", "--cluster-name", var.cluster_name] - command = "aws" - } -} - # # Base API Gateway setup # diff --git a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf index 70710e3..db7a708 100644 --- a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf @@ -16,7 +16,18 @@ terraform { required_version = "1.3.6" } -provider "cloudflare" { - api_key = var.cloudflare_api_key - email = var.cloudflare_email +data "aws_eks_cluster_auth" "cluster" { + name = var.cluster_name +} + +provider "kubernetes" { + host = var.cluster_endpoint + #config_path = "~/.kube/config" + cluster_ca_certificate = base64decode(var.cluster_ca) + token = data.aws_eks_cluster_auth.cluster.token + #exec { + # api_version = "client.authentication.k8s.io/v1beta1" + # args = ["eks", "get-token", "--cluster-name", var.cluster_name] + # command = "aws" + #} } \ No newline at end of file diff --git a/terraform-k8s-infrastructure/modules/k8s_namespaces/versions.tf b/terraform-k8s-infrastructure/modules/k8s_namespaces/versions.tf index c28252f..44a878b 100644 --- a/terraform-k8s-infrastructure/modules/k8s_namespaces/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_namespaces/versions.tf @@ -10,12 +10,4 @@ terraform { } } required_version = "1.3.6" -} - -provider "aws" { - region = var.aws_region -} - -provider "kubernetes" { - config_path = "~/.kube/config" -} +} \ No newline at end of file diff --git a/terraform-k8s-infrastructure/versions.tf b/terraform-k8s-infrastructure/versions.tf index 9c10bd8..459abbe 100644 --- a/terraform-k8s-infrastructure/versions.tf +++ b/terraform-k8s-infrastructure/versions.tf @@ -9,6 +9,21 @@ terraform { source = "cloudflare/cloudflare" version = "~> 3.30.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 2.16.1" + } + + kubectl = { + source = "gavinbunney/kubectl" + version = "~> 1.14.0" + } + + helm = { + source = "hashicorp/helm" + version = "~> 2.8.0" + } } required_version = "1.3.6" } @@ -20,4 +35,45 @@ provider "aws" { provider "cloudflare" { api_key = var.cloudflare_api_key email = var.cloudflare_email +} + +data "aws_eks_cluster_auth" "cluster" { + name = data.aws_eks_cluster.rw_api.name +} + +provider "kubernetes" { + host = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" + #config_path = "~/.kube/config" + cluster_ca_certificate = base64decode(data.aws_eks_cluster.rw_api.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.cluster.token + #exec { + # api_version = "client.authentication.k8s.io/v1beta1" + # args = ["eks", "get-token", "--cluster-name", var.cluster_name] + # command = "aws" + #} +} + +provider "kubectl" { + host = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" + cluster_ca_certificate = base64decode(data.aws_eks_cluster.rw_api.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.cluster.token + load_config_file = false +} + +provider "helm" { + kubernetes { + host = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" + cluster_ca_certificate = base64decode(data.aws_eks_cluster.rw_api.certificate_authority.0.data) + token = data.aws_eks_cluster_auth.cluster.token + #exec { + # api_version = "client.authentication.k8s.io/v1beta1" + # args = [ + # "eks", + # "get-token", + # "--cluster-name", + # var.cluster_name + # ] + # command = "aws" + #} + } } \ No newline at end of file From 1ef10851572246d0e2a32771461d030d49b8694a Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 5 Feb 2026 15:26:41 -0500 Subject: [PATCH 18/47] Run each module separately to avoid token timeout Trying kube config instead of token. Try init after kube config. Try excluding Data Layer. Oops, use target instead. --- .github/workflows/terraform_plan.yaml | 37 +++++++++++++++---- terraform-k8s-infrastructure/main.tf | 18 ++++----- .../k8s_microservice_routing/versions.tf | 10 ++--- terraform-k8s-infrastructure/versions.tf | 18 +++++---- 4 files changed, 54 insertions(+), 29 deletions(-) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 0ee0365..2f32564 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -35,26 +35,49 @@ jobs: with: terraform_version: 1.3.6 - - name: TF Init + - name: TF Init EKS Cluster #run: ./scripts/infra -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars run: terraform -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars - - name: TF Plan + - name: TF Plan EKS Cluster run: | terraform -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" - - - name: TF Init - run: terraform -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars - name: Configure Kubeconfig run: aws eks update-kubeconfig --region us-east-1 --name core-k8s-cluster-$ENV - - name: TF Plan + - name: TF Init K8s Infra + run: terraform -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars + + - name: TF Plan K8s Infra run: | terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" + -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ + -target module.k8s_namespaces + -target module.k8s_infrastructure + -target module.k8s_microservice_routing + +# - name: TF Plan Infrastructure +# run: | +# terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ +# -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ +# -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ +# -target module.k8s_infrastructure + +# - name: TF Plan Data Layer +# run: | +# terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ +# -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ +# -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ +# -target module.k8s_data_layer +# - name: TF Plan MS Routing +# run: | +# terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ +# -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ +# -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ +# -target module.k8s_microservice_routing diff --git a/terraform-k8s-infrastructure/main.tf b/terraform-k8s-infrastructure/main.tf index 1e68a42..619e3a9 100644 --- a/terraform-k8s-infrastructure/main.tf +++ b/terraform-k8s-infrastructure/main.tf @@ -16,6 +16,15 @@ data "aws_vpc" "eks_vpc" { cidr_block = "10.0.0.0/16" } +module "k8s_namespaces" { + source = "./modules/k8s_namespaces" + cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" + cluster_ca = data.aws_eks_cluster.rw_api.certificate_authority.0.data + cluster_name = data.aws_eks_cluster.rw_api.name + kubectl_context = "aws-rw-${var.environment}" + namespaces = var.namespaces +} + module "k8s_infrastructure" { source = "./modules/k8s_infrastructure" cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" @@ -57,12 +66,3 @@ module "k8s_microservice_routing" { cloudflare_api_key = var.cloudflare_api_key cloudflare_email = var.cloudflare_email } - -module "k8s_namespaces" { - source = "./modules/k8s_namespaces" - cluster_endpoint = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" - cluster_ca = data.aws_eks_cluster.rw_api.certificate_authority.0.data - cluster_name = data.aws_eks_cluster.rw_api.name - kubectl_context = "aws-rw-${var.environment}" - namespaces = var.namespaces -} diff --git a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf index db7a708..7800cf5 100644 --- a/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf +++ b/terraform-k8s-infrastructure/modules/k8s_microservice_routing/versions.tf @@ -16,15 +16,15 @@ terraform { required_version = "1.3.6" } -data "aws_eks_cluster_auth" "cluster" { - name = var.cluster_name -} +#data "aws_eks_cluster_auth" "cluster" { +# name = var.cluster_name +#} provider "kubernetes" { host = var.cluster_endpoint - #config_path = "~/.kube/config" + config_path = "~/.kube/config" cluster_ca_certificate = base64decode(var.cluster_ca) - token = data.aws_eks_cluster_auth.cluster.token + #token = data.aws_eks_cluster_auth.cluster.token #exec { # api_version = "client.authentication.k8s.io/v1beta1" # args = ["eks", "get-token", "--cluster-name", var.cluster_name] diff --git a/terraform-k8s-infrastructure/versions.tf b/terraform-k8s-infrastructure/versions.tf index 459abbe..42b6fef 100644 --- a/terraform-k8s-infrastructure/versions.tf +++ b/terraform-k8s-infrastructure/versions.tf @@ -37,15 +37,15 @@ provider "cloudflare" { email = var.cloudflare_email } -data "aws_eks_cluster_auth" "cluster" { - name = data.aws_eks_cluster.rw_api.name -} +#data "aws_eks_cluster_auth" "cluster" { +# name = data.aws_eks_cluster.rw_api.name +#} provider "kubernetes" { host = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" - #config_path = "~/.kube/config" + config_path = "~/.kube/config" cluster_ca_certificate = base64decode(data.aws_eks_cluster.rw_api.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token + #token = data.aws_eks_cluster_auth.cluster.token #exec { # api_version = "client.authentication.k8s.io/v1beta1" # args = ["eks", "get-token", "--cluster-name", var.cluster_name] @@ -56,15 +56,17 @@ provider "kubernetes" { provider "kubectl" { host = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" cluster_ca_certificate = base64decode(data.aws_eks_cluster.rw_api.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false + #token = data.aws_eks_cluster_auth.cluster.token + load_config_file = true + config_path = "~/.kube/config" } provider "helm" { kubernetes { host = "${data.aws_eks_cluster.rw_api.endpoint}:${var.cluster_port}" cluster_ca_certificate = base64decode(data.aws_eks_cluster.rw_api.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token + #token = data.aws_eks_cluster_auth.cluster.token + config_path = "~/.kube/config" #exec { # api_version = "client.authentication.k8s.io/v1beta1" # args = [ From 29ea450b326814d409c6134e3b52d985dc7f65c1 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 6 Feb 2026 16:13:47 -0500 Subject: [PATCH 19/47] Adds access entry for GHA OIDC role. Try using OIDC role. Adds needed permission and job order. Fix env var.s Forgot to escape newlines. Remove targets. --- .github/workflows/terraform_plan.yaml | 43 +++++++++++++++++++++++---- terraform/main.tf | 1 + terraform/modules/eks/main.tf | 20 +++++++++++++ terraform/modules/eks/variable.tf | 5 ++++ terraform/variables.tf | 5 ++++ terraform/vars/terraform-dev.tfvars | 1 + 6 files changed, 70 insertions(+), 5 deletions(-) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 2f32564..83f4469 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -4,8 +4,11 @@ on: pull_request: branches: [dev, staging] +permissions: + id-token: write + jobs: - plan: + plan_eks_cluster: runs-on: ubuntu-latest env: @@ -46,6 +49,39 @@ jobs: -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" + plan_k8s_infra: + runs-on: ubuntu-latest + needs: plan_eks_cluster + + env: + ENV: ${{ github.base_ref }} + AWS_ROLE: >- + ${{ github.base_ref == 'production' && 'TBD' || + github.base_ref == 'staging' && 'TBD' || + 'arn:aws:iam::842534099497:role/wri-api-dev-githubactions-role' }} + AWS_REGION: >- + ${{ github.base_ref == 'production' && secrets.aws_region_production || + github.base_ref == 'staging' && secrets.aws_region_staging || + secrets.aws_region_dev }} + TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} + TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} + TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@main + with: + role-to-assume: ${{ env.AWS_ROLE }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.3.6 + - name: Configure Kubeconfig run: aws eks update-kubeconfig --region us-east-1 --name core-k8s-cluster-$ENV @@ -56,10 +92,7 @@ jobs: run: | terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -target module.k8s_namespaces - -target module.k8s_infrastructure - -target module.k8s_microservice_routing + -var "cloudflare_email=${TF_VAR_cloudflare_email}" # - name: TF Plan Infrastructure # run: | diff --git a/terraform/main.tf b/terraform/main.tf index fa7947f..67d9f02 100644 --- a/terraform/main.tf +++ b/terraform/main.tf @@ -56,6 +56,7 @@ module "eks" { ebs_csi_addon_version = var.ebs_csi_addon_version kube_proxy_addon_version = var.kube_proxy_addon_version admin_role_arns = data.aws_iam_roles.admin_arn.arns + gha_role_arn = var.gha_role_arn subnet_ids = [ module.vpc.private_subnets[0].id, module.vpc.private_subnets[1].id, diff --git a/terraform/modules/eks/main.tf b/terraform/modules/eks/main.tf index c9d0d1a..8e0f701 100644 --- a/terraform/modules/eks/main.tf +++ b/terraform/modules/eks/main.tf @@ -286,3 +286,23 @@ resource "aws_eks_access_policy_association" "admin_policy" { aws_eks_access_entry.admin_role ] } + +resource "aws_eks_access_entry" "gha_role" { + cluster_name = aws_eks_cluster.eks_cluster.name + principal_arn = var.gha_role_arn + type = "STANDARD" +} + +resource "aws_eks_access_policy_association" "gha_policy" { + cluster_name = aws_eks_cluster.eks_cluster.name + policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy" + principal_arn = var.gha_role_arn + + access_scope { + type = "cluster" + } + + depends_on = [ + aws_eks_access_entry.gha_role + ] +} \ No newline at end of file diff --git a/terraform/modules/eks/variable.tf b/terraform/modules/eks/variable.tf index 5de88da..109db78 100644 --- a/terraform/modules/eks/variable.tf +++ b/terraform/modules/eks/variable.tf @@ -44,3 +44,8 @@ variable "admin_role_arns" { type = set(string) description = "ARN of the Role used for admin cluster access." } + +variable "gha_role_arn" { + type = string + description = "ARN of the Role used for Github Actions." +} diff --git a/terraform/variables.tf b/terraform/variables.tf index 6e2d9e4..48228a9 100644 --- a/terraform/variables.tf +++ b/terraform/variables.tf @@ -283,3 +283,8 @@ variable "email_recipients" { description = "List of email addresses to contact in case an alert fails" default = [] } + +variable "gha_role_arn" { + type = string + description = "ARN of the Role used for Github Actions." +} diff --git a/terraform/vars/terraform-dev.tfvars b/terraform/vars/terraform-dev.tfvars index 2a605ff..8aa3717 100644 --- a/terraform/vars/terraform-dev.tfvars +++ b/terraform/vars/terraform-dev.tfvars @@ -26,3 +26,4 @@ gateway_node_group_desired_size = 0 hibernate = false aq_bucket_cors_allowed_origin = "*" deploy_sparkpost_templates = false +gha_role_arn = "arn:aws:iam::842534099497:role/wri-api-dev-githubactions-role" \ No newline at end of file From fcb14d72fd49c371ca26a2c44feaff1cfb045b24 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 10 Feb 2026 17:16:37 -0500 Subject: [PATCH 20/47] Upload plan file as artifact. --- .github/workflows/terraform_build.yaml | 15 ++++++--- .github/workflows/terraform_plan.yaml | 44 ++++++++++++-------------- 2 files changed, 31 insertions(+), 28 deletions(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index ded2ff1..7d61538 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -27,17 +27,24 @@ jobs: TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} steps: - - uses: actions/checkout@v1 + - uses: actions/checkout@v4 + + - name: Setup terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.3.6 - name: TF Init - run: ./scripts/infra -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars + run: terraform -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars - name: TF Plan run: | - ./scripts/infra -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ + terraform -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" + -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ + -out tf.plan + - name: TF Apply run: | ./scripts/infra -chdir=terraform apply -auto-approve -var-file=vars/terraform-$ENV.tfvars \ diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 83f4469..0320093 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -1,4 +1,4 @@ -name: Plan terraform changes for base branch +name: Terraform Plan on: pull_request: @@ -39,7 +39,6 @@ jobs: terraform_version: 1.3.6 - name: TF Init EKS Cluster - #run: ./scripts/infra -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars run: terraform -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars - name: TF Plan EKS Cluster @@ -47,7 +46,16 @@ jobs: terraform -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" + -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ + -out tf_eks_${{ github.event.pull_request.number }}.plan + + - name: Upload EKS Cluster Plan File + uses: actions/upload-artifact@v4 + with: + name: tf_eks_${{ github.event.pull_request.number }}.plan + path: "terraform/tf_eks_${{ github.event.pull_request.number }}.plan" + if-no-files-found: 'error' + overwrite: true plan_k8s_infra: runs-on: ubuntu-latest @@ -92,25 +100,13 @@ jobs: run: | terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" + -var "cloudflare_email=${TF_VAR_cloudflare_email}" | + -out tf_k8s_infra-${{ github.event.pull_request.nubmer }}.plan -# - name: TF Plan Infrastructure -# run: | -# terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -# -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -# -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -# -target module.k8s_infrastructure - -# - name: TF Plan Data Layer -# run: | -# terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -# -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -# -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -# -target module.k8s_data_layer - -# - name: TF Plan MS Routing -# run: | -# terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -# -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -# -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -# -target module.k8s_microservice_routing + - name: Upload K8s Infrastructure Plan File + uses: actions/upload-artifact@v4 + with: + name: tf_k8s_infra_${{ github.event.pull_request.number }}.plan + path: "terraform-k8s-infrastructure/tf_k8s_infra_${{ github.event.pull_request.number }}.plan" + if-no-files-found: 'error' + overwrite: true From 02be86d2e0c58e483f486b7926e8622c64f19fb7 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 10 Feb 2026 18:16:01 -0500 Subject: [PATCH 21/47] Use plan artifact in apply. Mispelled number. Dash not underscore. Standardize dashes and underscores. --- .github/workflows/terraform_build.yaml | 134 +++++++++++++++++++++---- .github/workflows/terraform_plan.yaml | 10 +- .gitignore | 3 + 3 files changed, 120 insertions(+), 27 deletions(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index 7d61538..e8ea281 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -4,27 +4,33 @@ on: push: branches: [dev, staging] +permissions: + id-token: write + contents: read + pull-requests: read + actions: read + jobs: - build_dev: + build_eks_cluster: runs-on: ubuntu-latest env: - ENV: ${{ github.ref_name }} - AWS_ACCESS_KEY_ID: >- - ${{ github.ref_name == 'production' && secrets.aws_key_production || - github.ref_name == 'staging' && secrets.aws_key_staging || - secrets.aws_key_dev }} - AWS_SECRET_ACCESS_KEY: >- - ${{ github.ref_name == 'production' && secrets.aws_secret_production || - github.ref_name == 'staging' && secrets.aws_secret_staging || - secrets.aws_secret_dev }} - AWS_REGION: >- - ${{ github.ref_name == 'production' && secrets.aws_region_production || - github.ref_name == 'staging' && secrets.aws_region_staging || - secrets.aws_region_dev }} - TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} - TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} - TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} + ENV: ${{ github.ref_name }} + AWS_ACCESS_KEY_ID: >- + ${{ github.ref_name == 'production' && secrets.aws_key_production || + github.ref_name == 'staging' && secrets.aws_key_staging || + secrets.aws_key_dev }} + AWS_SECRET_ACCESS_KEY: >- + ${{ github.ref_name == 'production' && secrets.aws_secret_production || + github.ref_name == 'staging' && secrets.aws_secret_staging || + secrets.aws_secret_dev }} + AWS_REGION: >- + ${{ github.ref_name == 'production' && secrets.aws_region_production || + github.ref_name == 'staging' && secrets.aws_region_staging || + secrets.aws_region_dev }} + TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} + TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} + TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} steps: - uses: actions/checkout@v4 @@ -37,17 +43,101 @@ jobs: - name: TF Init run: terraform -chdir=terraform init -backend-config=vars/backend-$ENV.tfvars - - name: TF Plan + #- name: TF Plan + # run: | + # terraform -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ + # -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ + # -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ + # -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ + # -out tf.plan + + - name: Get PR Number + uses: jwalton/gh-find-current-pr@master + id: findpr + with: + state: all + + - name: Download TF EKS Cluster Plan + uses: dawidd6/action-download-artifact@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + workflow: terraform_plan.yaml + pr: ${{ steps.findpr.outputs.pr }} + name: tf_eks-${{ steps.findpr.outputs.pr }}.plan + path: terraform/ + check_artifacts: true + + - name: TF Apply run: | - terraform -chdir=terraform plan -var-file=vars/terraform-$ENV.tfvars \ + terraform -chdir=terraform apply -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ - -out tf.plan + tf_eks-${{ steps.findpr.outputs.pr }}.plan + +build_k8s_infra: + runs-on: ubuntu-latest + needs: build_eks_cluster + + env: + ENV: ${{ github.ref_name }} + AWS_ACCESS_KEY_ID: >- + ${{ github.ref_name == 'production' && secrets.aws_key_production || + github.ref_name == 'staging' && secrets.aws_key_staging || + secrets.aws_key_dev }} + AWS_SECRET_ACCESS_KEY: >- + ${{ github.ref_name == 'production' && secrets.aws_secret_production || + github.ref_name == 'staging' && secrets.aws_secret_staging || + secrets.aws_secret_dev }} + AWS_REGION: >- + ${{ github.ref_name == 'production' && secrets.aws_region_production || + github.ref_name == 'staging' && secrets.aws_region_staging || + secrets.aws_region_dev }} + TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} + TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} + TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@main + with: + role-to-assume: ${{ env.AWS_ROLE }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.3.6 + + - name: Configure Kubeconfig + run: aws eks update-kubeconfig --region us-east-1 --name core-k8s-cluster-$ENV + + - name: TF Init K8s Infra + run: terraform -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars + + - name: Get PR Number + uses: jwalton/gh-find-current-pr@master + id: findpr + with: + state: all + + - name: Download TF k8s Infra Plan + uses: dawidd6/action-download-artifact@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + workflow: terraform_plan.yaml + pr: ${{ steps.findpr.outputs.pr }} + name: tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan + path: terraform-k8s-infrastructure/ + check_artifacts: true - name: TF Apply run: | - ./scripts/infra -chdir=terraform apply -auto-approve -var-file=vars/terraform-$ENV.tfvars \ + terraform -chdir=terraform_k8s_infrastructure apply -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" + -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ + tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 0320093..c4c6f71 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -47,12 +47,12 @@ jobs: -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ - -out tf_eks_${{ github.event.pull_request.number }}.plan + -out tf_eks-${{ github.event.pull_request.number }}.plan - name: Upload EKS Cluster Plan File uses: actions/upload-artifact@v4 with: - name: tf_eks_${{ github.event.pull_request.number }}.plan + name: tf_eks-${{ github.event.pull_request.number }}.plan path: "terraform/tf_eks_${{ github.event.pull_request.number }}.plan" if-no-files-found: 'error' overwrite: true @@ -100,13 +100,13 @@ jobs: run: | terraform -chdir=terraform-k8s-infrastructure plan -var-file=vars/terraform-$ENV.tfvars \ -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" | - -out tf_k8s_infra-${{ github.event.pull_request.nubmer }}.plan + -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ + -out tf_k8s_infra-${{ github.event.pull_request.number }}.plan - name: Upload K8s Infrastructure Plan File uses: actions/upload-artifact@v4 with: name: tf_k8s_infra_${{ github.event.pull_request.number }}.plan - path: "terraform-k8s-infrastructure/tf_k8s_infra_${{ github.event.pull_request.number }}.plan" + path: "terraform-k8s-infrastructure/tf_k8s_infra-${{ github.event.pull_request.number }}.plan" if-no-files-found: 'error' overwrite: true diff --git a/.gitignore b/.gitignore index 0e0857b..30ac4e7 100644 --- a/.gitignore +++ b/.gitignore @@ -53,3 +53,6 @@ terraform-k8s-infrastructure/vars/private.tfvars terraform.tfstate terraform.tfstate.backup + +# Snyk Security Extension - AI Rules (auto-generated) +.github/instructions/snyk_rules.instructions.md From 46f6685d6c8d2d886ab74184a0bce61e96623968 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 10:35:50 -0500 Subject: [PATCH 22/47] Missed a dash. --- .github/workflows/terraform_plan.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index c4c6f71..6fd22f7 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -53,7 +53,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: tf_eks-${{ github.event.pull_request.number }}.plan - path: "terraform/tf_eks_${{ github.event.pull_request.number }}.plan" + path: "terraform/tf_eks-${{ github.event.pull_request.number }}.plan" if-no-files-found: 'error' overwrite: true From 34e06c70ab89bf296de37487074409da24845d3a Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 11:44:10 -0500 Subject: [PATCH 23/47] Fix indentation issue. --- .github/workflows/terraform_build.yaml | 130 ++++++++++++------------- 1 file changed, 65 insertions(+), 65 deletions(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index e8ea281..c6f96f6 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -75,69 +75,69 @@ jobs: -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ tf_eks-${{ steps.findpr.outputs.pr }}.plan -build_k8s_infra: - runs-on: ubuntu-latest - needs: build_eks_cluster - - env: - ENV: ${{ github.ref_name }} - AWS_ACCESS_KEY_ID: >- - ${{ github.ref_name == 'production' && secrets.aws_key_production || - github.ref_name == 'staging' && secrets.aws_key_staging || - secrets.aws_key_dev }} - AWS_SECRET_ACCESS_KEY: >- - ${{ github.ref_name == 'production' && secrets.aws_secret_production || - github.ref_name == 'staging' && secrets.aws_secret_staging || - secrets.aws_secret_dev }} - AWS_REGION: >- - ${{ github.ref_name == 'production' && secrets.aws_region_production || - github.ref_name == 'staging' && secrets.aws_region_staging || - secrets.aws_region_dev }} - TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} - TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} - TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@main - with: - role-to-assume: ${{ env.AWS_ROLE }} - aws-region: ${{ env.AWS_REGION }} - - - name: Setup terraform - uses: hashicorp/setup-terraform@v3 - with: - terraform_version: 1.3.6 - - - name: Configure Kubeconfig - run: aws eks update-kubeconfig --region us-east-1 --name core-k8s-cluster-$ENV - - - name: TF Init K8s Infra - run: terraform -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars - - - name: Get PR Number - uses: jwalton/gh-find-current-pr@master - id: findpr - with: - state: all - - - name: Download TF k8s Infra Plan - uses: dawidd6/action-download-artifact@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - workflow: terraform_plan.yaml - pr: ${{ steps.findpr.outputs.pr }} - name: tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan - path: terraform-k8s-infrastructure/ - check_artifacts: true + build_k8s_infra: + runs-on: ubuntu-latest + needs: build_eks_cluster - - name: TF Apply - run: | - terraform -chdir=terraform_k8s_infrastructure apply -var-file=vars/terraform-$ENV.tfvars \ - -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ - tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan + env: + ENV: ${{ github.ref_name }} + AWS_ACCESS_KEY_ID: >- + ${{ github.ref_name == 'production' && secrets.aws_key_production || + github.ref_name == 'staging' && secrets.aws_key_staging || + secrets.aws_key_dev }} + AWS_SECRET_ACCESS_KEY: >- + ${{ github.ref_name == 'production' && secrets.aws_secret_production || + github.ref_name == 'staging' && secrets.aws_secret_staging || + secrets.aws_secret_dev }} + AWS_REGION: >- + ${{ github.ref_name == 'production' && secrets.aws_region_production || + github.ref_name == 'staging' && secrets.aws_region_staging || + secrets.aws_region_dev }} + TF_VAR_cloudflare_api_key: ${{ secrets.cloudflare_api_key }} + TF_VAR_cloudflare_email: ${{ secrets.cloudflare_email }} + TF_VAR_sparkpost_api_key: ${{ secrets.sparkpost_api_key }} + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@main + with: + role-to-assume: ${{ env.AWS_ROLE }} + aws-region: ${{ env.AWS_REGION }} + + - name: Setup terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_version: 1.3.6 + + - name: Configure Kubeconfig + run: aws eks update-kubeconfig --region us-east-1 --name core-k8s-cluster-$ENV + + - name: TF Init K8s Infra + run: terraform -chdir=terraform-k8s-infrastructure init -backend-config=vars/backend-$ENV.tfvars + + - name: Get PR Number + uses: jwalton/gh-find-current-pr@master + id: findpr + with: + state: all + + - name: Download TF k8s Infra Plan + uses: dawidd6/action-download-artifact@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + workflow: terraform_plan.yaml + pr: ${{ steps.findpr.outputs.pr }} + name: tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan + path: terraform-k8s-infrastructure/ + check_artifacts: true + + - name: TF Apply + run: | + terraform -chdir=terraform_k8s_infrastructure apply -var-file=vars/terraform-$ENV.tfvars \ + -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ + -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ + -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ + tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan From b54c5ca71bcd55800c08ea2252de8ac923e1eb05 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 11:58:15 -0500 Subject: [PATCH 24/47] Get closed PR which should be the one that was merged. --- .github/workflows/terraform_build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index c6f96f6..3d3c107 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -55,7 +55,7 @@ jobs: uses: jwalton/gh-find-current-pr@master id: findpr with: - state: all + state: closed - name: Download TF EKS Cluster Plan uses: dawidd6/action-download-artifact@v3 @@ -122,7 +122,7 @@ jobs: uses: jwalton/gh-find-current-pr@master id: findpr with: - state: all + state: closed - name: Download TF k8s Infra Plan uses: dawidd6/action-download-artifact@v3 From afd298b783d1469c56282982ad2bd15feefdb22a Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 12:09:50 -0500 Subject: [PATCH 25/47] Don't pass vars when using plan file. --- .github/workflows/terraform_build.yaml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index 3d3c107..6b120b3 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -69,11 +69,7 @@ jobs: - name: TF Apply run: | - terraform -chdir=terraform apply -var-file=vars/terraform-$ENV.tfvars \ - -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ - tf_eks-${{ steps.findpr.outputs.pr }}.plan + terraform -chdir=terraform apply tf_eks-${{ steps.findpr.outputs.pr }}.plan build_k8s_infra: runs-on: ubuntu-latest @@ -136,8 +132,5 @@ jobs: - name: TF Apply run: | - terraform -chdir=terraform_k8s_infrastructure apply -var-file=vars/terraform-$ENV.tfvars \ - -var "cloudflare_api_key=${TF_VAR_cloudflare_api_key}" \ - -var "cloudflare_email=${TF_VAR_cloudflare_email}" \ - -var "sparkpost_api_key=${TF_VAR_sparkpost_api_key}" \ + terraform -chdir=terraform_k8s_infrastructure apply \ tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan From 17060a4c96681350674eac108dd66bb18d227063 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 12:45:21 -0500 Subject: [PATCH 26/47] Missed a dash. --- .github/workflows/terraform_plan.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index 6fd22f7..adcaec8 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -106,7 +106,7 @@ jobs: - name: Upload K8s Infrastructure Plan File uses: actions/upload-artifact@v4 with: - name: tf_k8s_infra_${{ github.event.pull_request.number }}.plan + name: tf_k8s_infra-${{ github.event.pull_request.number }}.plan path: "terraform-k8s-infrastructure/tf_k8s_infra-${{ github.event.pull_request.number }}.plan" if-no-files-found: 'error' overwrite: true From 964c8191793486744363523d12f3f7d080dfd3eb Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 13:05:01 -0500 Subject: [PATCH 27/47] Fixes dashes in path... --- .github/workflows/terraform_build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index 6b120b3..b9ff670 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -132,5 +132,5 @@ jobs: - name: TF Apply run: | - terraform -chdir=terraform_k8s_infrastructure apply \ + terraform -chdir=terraform-k8s-infrastructure apply \ tf_k8s_infra-${{ steps.findpr.outputs.pr }}.plan From 221ff03947b6a80a5f4f7486428237b6d0eddf11 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 14:00:40 -0500 Subject: [PATCH 28/47] Fix helm chart for redis. --- terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf index 5c9dc00..ec37495 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf @@ -1,6 +1,7 @@ resource "helm_release" "redis" { name = "redis" - chart = "bitnami/redis" + repository = "https://charts.bitnami.com/bitnami" + chart = "redis" namespace = "core" version = "16.13.2" From ea4237187b4c10a8442e3c318cd183b1e6bc5ee5 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 14:35:54 -0500 Subject: [PATCH 29/47] Actually use proper AWS role for apply. --- .github/workflows/terraform_build.yaml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index b9ff670..e13e0fa 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -77,14 +77,10 @@ jobs: env: ENV: ${{ github.ref_name }} - AWS_ACCESS_KEY_ID: >- - ${{ github.ref_name == 'production' && secrets.aws_key_production || - github.ref_name == 'staging' && secrets.aws_key_staging || - secrets.aws_key_dev }} - AWS_SECRET_ACCESS_KEY: >- - ${{ github.ref_name == 'production' && secrets.aws_secret_production || - github.ref_name == 'staging' && secrets.aws_secret_staging || - secrets.aws_secret_dev }} + AWS_ROLE: >- + ${{ github.base_ref == 'production' && 'TBD' || + github.base_ref == 'staging' && 'TBD' || + 'arn:aws:iam::842534099497:role/wri-api-dev-githubactions-role' }} AWS_REGION: >- ${{ github.ref_name == 'production' && secrets.aws_region_production || github.ref_name == 'staging' && secrets.aws_region_staging || From 8bb8143acd31bd69072b6b0b24adf1378c773906 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 14:53:08 -0500 Subject: [PATCH 30/47] Add value back in to redis chart values. --- .../modules/k8s_data_layer/redis/redis.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml index 75180ab..6549f7e 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml @@ -232,8 +232,8 @@ master: ## ## Can be used to specify command line arguments, for example: ## - # command: - # - "/run.sh" + command: + - "/run.sh" ## Additional Redis configuration for the master nodes ## ref: https://redis.io/topics/config ## From 7947e9b6df1226835ed1376c85bdd2d44a44073c Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Wed, 11 Feb 2026 15:27:00 -0500 Subject: [PATCH 31/47] Temporarily allow force update on Redis chart. --- terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf | 4 ++++ .../modules/k8s_data_layer/redis/redis.yaml | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf index ec37495..094aebf 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf @@ -8,6 +8,10 @@ resource "helm_release" "redis" { values = [ file("${path.module}/redis/redis.yaml") ] + + // Enable force update and pod recreation + force_update = true + recreate_pods = true } diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml index 6549f7e..75180ab 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml @@ -232,8 +232,8 @@ master: ## ## Can be used to specify command line arguments, for example: ## - command: - - "/run.sh" + # command: + # - "/run.sh" ## Additional Redis configuration for the master nodes ## ref: https://redis.io/topics/config ## From 9f528c91ac2ae2d644909b75d13238c8daa4d0b4 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 12 Feb 2026 14:49:36 -0500 Subject: [PATCH 32/47] Updates image and charts. --- .../modules/k8s_data_layer/postgresql/postgresql.yaml | 2 +- .../modules/k8s_data_layer/rabbitmq.tf | 3 ++- .../modules/k8s_data_layer/rabbitmq/rabbitmq.yaml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml index 32a2cd1..94bc867 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml @@ -14,7 +14,7 @@ global: ## image: registry: docker.io - repository: bitnami/postgresql + repository: bitnamilegacy/postgresql tag: 9.6-debian-10 ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf index cae9145..5cb57f7 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf @@ -6,7 +6,8 @@ data "kubernetes_secret" "rabbitmq_core" { } resource "helm_release" "rabbitmq" { name = "rabbitmq" - chart = "bitnami/rabbitmq" + repository = "https://charts.bitnami.com/bitnami" + chart = "rabbitmq" namespace = "core" version = "6.18.2" diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq/rabbitmq.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq/rabbitmq.yaml index a71ee72..ebe7793 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq/rabbitmq.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq/rabbitmq.yaml @@ -13,7 +13,7 @@ global: ## image: registry: docker.io - repository: bitnami/rabbitmq + repository: bitnamilegacy/rabbitmq tag: 3.7-debian-9 ## set to true if you would like to see extra information on logs From 88c8c72dd364b426005261d19891cee4c67d95c5 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 12 Feb 2026 14:51:57 -0500 Subject: [PATCH 33/47] Update postgresql chart repo. --- .../modules/k8s_data_layer/postgresql.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index a70a14d..708f866 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -6,7 +6,8 @@ data "kubernetes_secret" "postgresql_core" { } resource "helm_release" "postgresql" { name = "postgresql" - chart = "bitnami/postgresql" + repository = "https://charts.bitnami.com/bitnami" + chart = "postgresql" namespace = "core" version = "8.6.4" From bf773d75909314f18ac38f9dfce9967df56443ca Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 12 Feb 2026 18:25:42 -0500 Subject: [PATCH 34/47] Updates postgresql helm chart and turns on GHA concurrency group. --- .github/workflows/terraform_build.yaml | 4 + .github/workflows/terraform_plan.yaml | 4 + .../modules/k8s_data_layer/postgresql.tf | 2 +- .../k8s_data_layer/postgresql/postgresql.yaml | 2237 ++++++++++++++--- 4 files changed, 1864 insertions(+), 383 deletions(-) diff --git a/.github/workflows/terraform_build.yaml b/.github/workflows/terraform_build.yaml index e13e0fa..2c7ed9e 100644 --- a/.github/workflows/terraform_build.yaml +++ b/.github/workflows/terraform_build.yaml @@ -1,5 +1,9 @@ name: Run tests and apply terraform changes for current branch +concurrency: + group: deploy-terraform + cancel-in-progress: false + on: push: branches: [dev, staging] diff --git a/.github/workflows/terraform_plan.yaml b/.github/workflows/terraform_plan.yaml index adcaec8..fb859ec 100644 --- a/.github/workflows/terraform_plan.yaml +++ b/.github/workflows/terraform_plan.yaml @@ -1,5 +1,9 @@ name: Terraform Plan +concurrency: + group: deploy-terraform + cancel-in-progress: false + on: pull_request: branches: [dev, staging] diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index 708f866..cefa1d2 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -9,7 +9,7 @@ resource "helm_release" "postgresql" { repository = "https://charts.bitnami.com/bitnami" chart = "postgresql" namespace = "core" - version = "8.6.4" + version = "18.3.0" values = [ file("${path.module}/postgresql/postgresql.yaml") diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml index 94bc867..2c3848f 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml @@ -1,299 +1,590 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Please, note that this will override the parameters, including dependencies, configured to use the global value ## global: - postgresql: { } -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass + ## @param global.imageRegistry Global Docker image registry + ## + imageRegistry: "" + ## @param global.imagePullSecrets Global Docker registry secret names as an array + ## e.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + ## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead + ## + defaultStorageClass: "" + storageClass: "" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + #postgresql: + ## @param global.postgresql.fullnameOverride Full chart name (overrides `fullnameOverride`) + ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) + ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) + ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) + ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) + ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). + ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. + ## + #fullnameOverride: "" + + #auth: + # postgresPassword: "" + # username: "" + # password: "" + # database: "" + # existingSecret: "" + # secretKeys: + # adminPasswordKey: "" + # userPasswordKey: "" + # replicationPasswordKey: "" + ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) + ## + #service: + # ports: + # postgresql: "" + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param secretAnnotations Add annotations to the secrets +## +secretAnnotations: {} +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity +## @section PostgreSQL common parameters +## ## Bitnami PostgreSQL image version ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## @param image.registry [default: REGISTRY_NAME] PostgreSQL image registry +## @param image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository +## @skip image.tag PostgreSQL image tag (immutable tags are recommended) +## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy PostgreSQL image pull policy +## @param image.pullSecrets Specify image pull secrets +## @param image.debug Specify if debug values should be set ## image: registry: docker.io - repository: bitnamilegacy/postgresql + repository: bitnami/postgresql tag: 9.6-debian-10 + digest: "" ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName - + pullSecrets: [] ## Set to true if you would like to see extra information on logs - ## It turns BASH and NAMI debugging in minideb - ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## debug: false - -## String to partially override postgresql.fullname template (will maintain the release name) -## -# nameOverride: - -## String to fully override postgresql.fullname template +## Authentication parameters +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run +## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run ## -# fullnameOverride: - -## -## Init containers parameters: -## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup -## -volumePermissions: - enabled: false - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +auth: + ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user + ## + enablePostgresUser: true + ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided + ## + # postgresPassword: "" + ## @param auth.username Name for a custom user to create + ## + # username: "" + ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided + ## + # password: "" + ## @param auth.database Name for a custom database to create + ## + # database: "" + ## @param auth.replicationUsername Name of the replication user + ## + # replicationUsername: repl_user + ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided + ## + # replicationPassword: "" + ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. + ## + existingSecret: "postgresql" + ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. + ## + secretKeys: + adminPasswordKey: postgres-password + userPasswordKey: password + replicationPasswordKey: replication-password + ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable + ## + usePasswordFiles: true +## @param architecture PostgreSQL architecture (`standalone` or `replication`) ## -# schedulerName: - -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +architecture: standalone +## Replication configuration +## Ignored if `architecture` is `standalone` ## -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Pod Service Account -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -serviceAccount: - enabled: false - ## Name of an already existing service account. Setting this value disables the automatic service account creation. - # name: - replication: - enabled: false - user: repl_user - password: repl_password - slaveReplicas: 0 - ## Set synchronous commit mode: on, off, remote_apply, remote_write and local - ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL - synchronousCommit: "on" - ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication - ## NOTE: It cannot be > slaveReplicas - numSynchronousReplicas: 1 - ## Replication Cluster application name. Useful for defining multiple replication policies + ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` + ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. + ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT + ## + synchronousCommit: "off" + numSynchronousReplicas: 0 + ## @param replication.applicationName Cluster application name. Useful for advanced replication settings + ## applicationName: my_application - -## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) -# postgresqlPostgresPassword: - -## PostgreSQL user (has superuser privileges if username is `postgres`) -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run -postgresqlUsername: postgres - -## PostgreSQL password -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## @param containerPorts.postgresql PostgreSQL container port ## -# postgresqlPassword: - -## PostgreSQL password using existing secret -existingSecret: postgresql - -## Mount PostgreSQL secret as a file instead of passing environment variable -# usePasswordFile: false - -## Create a database -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run -## -# postgresqlDatabase: - -## PostgreSQL data dir -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -postgresqlDataDir: /bitnami/postgresql/data - -## An array to add extra environment variables -## For example: -## extraEnv: -## - name: FOO -## value: "bar" -## -# extraEnv: -extraEnv: [ ] - -## Name of a ConfigMap containing extra env vars -## -# extraEnvVarsCM: - -## Specify extra initdb args -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbArgs: - -## Specify a custom location for the PostgreSQL transaction log -## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md -## -# postgresqlInitdbWalDir: - -## PostgreSQL configuration -## Specify runtime configuration parameters as a dict, using camelCase, e.g. -## {"sharedBuffers": "500MB"} -## Alternatively, you can put your postgresql.conf under the files/ directory -## ref: https://www.postgresql.org/docs/current/static/runtime-config.html -## -# postgresqlConfiguration: - -## PostgreSQL extended configuration -## As above, but _appended_ to the main configuration -## Alternatively, you can put your *.conf under the files/conf.d/ directory -## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +containerPorts: + postgresql: 5432 +## Audit settings +## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing +## @param audit.logHostname Log client hostnames +## @param audit.logConnections Add client log-in operations to the log file +## @param audit.logDisconnections Add client log-outs operations to the log file +## @param audit.pgAuditLog Add operations to log using the pgAudit extension +## @param audit.pgAuditLogCatalog Log catalog using pgAudit +## @param audit.clientMinMessages Message log level to share with the user +## @param audit.logLinePrefix Template for log line prefix (default if not set) +## @param audit.logTimezone Timezone for the log timestamps ## -# postgresqlExtendedConf: - -## PostgreSQL client authentication configuration -## Specify content for pg_hba.conf -## Default: do not create pg_hba.conf -## Alternatively, you can put your pg_hba.conf under the files/ directory -# pgHbaConfiguration: |- -# local all all trust -# host all all localhost trust -# host mydatabase mysuser 192.168.0.0/24 md5 - -## ConfigMap with PostgreSQL configuration -## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration -# configurationConfigMap: - -## ConfigMap with PostgreSQL extended configuration -# extendedConfConfigMap: - -## initdb scripts -## Specify dictionary of scripts to be run at first boot -## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory -## -# initdbScripts: -# my_init_script.sh: | -# #!/bin/sh -# echo "Do something." - -## Specify the PostgreSQL username and password to execute the initdb scripts -# initdbUser: -# initdbPassword: - -## ConfigMap with scripts to be run at first boot -## NOTE: This will override initdbScripts -# initdbScriptsConfigMap: - -## Secret with scripts to be run at first boot (in case it contains sensitive information) -## NOTE: This can work along initdbScripts or initdbScriptsConfigMap -# initdbScriptsSecret: - -## Optional duration in seconds the pod needs to terminate gracefully. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods -## -# terminationGracePeriodSeconds: 30 - +audit: + logHostname: false + logConnections: false + logDisconnections: false + pgAuditLog: "" + pgAuditLogCatalog: "off" + clientMinMessages: error + logLinePrefix: "" + logTimezone: "" ## LDAP configuration +## @param ldap.enabled Enable LDAP support +## @param ldap.server IP address or name of the LDAP server. +## @param ldap.port Port number on the LDAP server to connect to +## @param ldap.prefix String to prepend to the user name when forming the DN to bind +## @param ldap.suffix String to append to the user name when forming the DN to bind +## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead +## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead +## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead +## @param ldap.basedn Root DN to begin the search for the user in +## @param ldap.binddn DN of user to bind to LDAP +## @param ldap.bindpw Password for the user to bind to LDAP +## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead +## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead +## @param ldap.searchAttribute Attribute to match against the user name in the search +## @param ldap.searchFilter The search filter to use when doing search+bind authentication +## @param ldap.scheme Set to `ldaps` to use LDAPS +## DEPRECATED ldap.tls as string is deprecated, please use 'ldap.tls.enabled' instead +## @param ldap.tls.enabled Se to true to enable TLS encryption ## ldap: enabled: false - url: "" server: "" port: "" prefix: "" suffix: "" - baseDN: "" - bindDN: "" - bind_password: - search_attr: "" - search_filter: "" + basedn: "" + binddn: "" + bindpw: "" + searchAttribute: "" + searchFilter: "" scheme: "" - tls: false - -## PostgreSQL service configuration -service: - ## PosgresSQL service type - type: ClusterIP - # clusterIP: None - port: 5432 - - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - # nodePort: - - ## Provide any additional annotations which may be required. - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - annotations: { } - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - - ## Load Balancer sources - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + tls: + enabled: false + ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. + ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html ## - # loadBalancerSourceRanges: - # - 10.10.10.0/24 - -## Start master and slave(s) pod(s) without limitations on shm memory. -## By default docker and containerd (and possibly other container runtimes) -## limit `/dev/shm` to `64M` (see e.g. the -## [docker issue](https://github.com/docker-library/postgres/issues/416) and the -## [containerd issue](https://github.com/containerd/containerd/issues/3654), -## which could be not enough if PostgreSQL uses parallel workers heavily. -## If this option is present and value is `true`, -## to the target database pod will be mounted a new tmpfs volume to remove -## this limitation. + uri: "" +## @param postgresqlDataDir PostgreSQL data dir folder +## +postgresqlDataDir: /bitnami/postgresql/data +## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) +## +postgresqlSharedPreloadLibraries: "pgaudit" +## Start PostgreSQL pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` +## ref: https://github.com/docker-library/postgres/issues/416 +## ref: https://github.com/containerd/containerd/issues/3654 +## shmVolume: + ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) + ## enabled: true - chmod: + ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs + ## Note: the size of the tmpfs counts against container's memory limit + ## e.g: + ## sizeLimit: 1Gi + ## + sizeLimit: "" +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's + ## + preferServerCiphers: true + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + ## @param tls.crlFilename File containing a Certificate Revocation List + ## + crlFilename: "" +## @section PostgreSQL Primary parameters +## +primary: + ## @param primary.name Name of the primary database (eg primary, master, leader, ...) + ## + name: primary + ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap + ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html + ## + # configuration: "" + ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration + ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html + ## e.g:# + ## pgHbaConfiguration: |- + ## local all all trust + ## host all all localhost trust + ## host mydatabase mysuser 192.168.0.0/24 md5 + ## + # pgHbaConfiguration: "" + ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration + ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored + ## + # existingConfigmap: "" + ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + # extendedConfiguration: "" + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration + ## NOTE: `primary.extendedConfiguration` will be ignored + ## + # existingExtendedConfigmap: "" + ## Initdb configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments + ## + #initdb: + ## @param primary.initdb.args PostgreSQL initdb extra arguments + ## + #args: "" + ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log + ## + #postgresqlWalDir: "" + ## @param primary.initdb.scripts Dictionary of initdb scripts + ## Specify dictionary of scripts to be run at first boot + ## e.g: + ## scripts: + ## my_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + ## + #scripts: {} + ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot + ## NOTE: This will override `primary.initdb.scripts` + ## + #scriptsConfigMap: "" + ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) + ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` + ## + #scriptsSecret: "" + ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts + ## + #user: "" + ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts + ## + #password: "" + ## Pre-init configuration + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql/#on-container-start + #preInitDb: + ## @param primary.preInitDb.scripts Dictionary of pre-init scripts + ## Specify dictionary of shell scripts to be run before db boot + ## e.g: + ## scripts: + ## my_pre_init_script.sh: | + ## #!/bin/sh + ## echo "Do something." + #scripts: {} + ## @param primary.preInitDb.scriptsConfigMap ConfigMap with pre-init scripts to be run + ## NOTE: This will override `primary.preInitDb.scripts` + #scriptsConfigMap: "" + ## @param primary.preInitDb.scriptsSecret Secret with pre-init scripts to be run + ## NOTE: This can work along `primary.preInitDb.scripts` or `primary.preInitDb.scriptsConfigMap` + #scriptsSecret: "" + ## Configure current cluster's primary server to be the standby server in other cluster. + ## This will allow cross cluster replication and provide cross cluster high availability. + ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. + ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not + ## @param primary.standby.primaryHost The Host of replication primary in the other cluster + ## @param primary.standby.primaryPort The Port of replication primary in the other cluster + ## + standby: + enabled: false + primaryHost: "" + primaryPort: "" + ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsCM: "" + ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes + ## + extraEnvVarsSecret: "" + ## @param primary.command Override default container command (useful when using custom images) + ## + command: [] + ## @param primary.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers + ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: enabled: true - -## PostgreSQL data Persistent Volume Storage Class -## If defined, storageClassName: -## If set to "-", storageClassName: "", which disables dynamic provisioning -## If undefined (the default) or set to null, no storageClassName spec is -## set, choosing the default provisioner. (gp2 on AWS, standard on -## GKE, AWS & OpenStack) -## -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers + ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe ## - # existingClaim: - - ## The path the volume will be mounted at, useful when using different - ## PostgreSQL images. + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers + ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe + ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param primary.startupProbe.successThreshold Success threshold for startupProbe ## - mountPath: /bitnami/postgresql - - ## The subdirectory of the volume to mount to, useful in dev environments - ## and one PV for multiple services. + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one ## - subPath: "" - - storageClass: "gp2" - accessModes: - - ReadWriteOnce - size: 75Gi - annotations: { } - -## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies -updateStrategy: - type: RollingUpdate + customLivenessProbe: {} + ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param primary.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL Primary resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + #resourcesPreset: "nano" + ## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + resources: + requests: + cpu: 250m + memory: 256mi + limits: + cpu: 2 + memory: 8Gi -## -## PostgreSQL Master parameters -## -master: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.podSecurityContext.enabled Enable security context + ## @param primary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param primary.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param primary.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: false + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param primary.containerSecurityContext.enabled Enabled containers' Security Context + ## @param primary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param primary.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param primary.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param primary.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param primary.containerSecurityContext.privileged Set container's Security Context privileged + ## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param primary.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param primary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: false + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param primary.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param primary.hostAliases PostgreSQL primary pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) + ## + hostNetwork: false + ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) + ## + labels: {} + ## @param primary.annotations Annotations for PostgreSQL primary pods + ## + annotations: {} + ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) + ## + podLabels: {} + ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) + ## + podAnnotations: {} + ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL Primary node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + #nodeAffinityPreset: + ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + #type: "" + ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + #key: "" + ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + #values: [] + ## @param primary.affinity Affinity for PostgreSQL primary pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption - nodeSelector: { } + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -303,161 +594,1209 @@ master: operator: In values: - core - tolerations: [ ] - labels: { } - annotations: { } - podLabels: { } - podAnnotations: { } + ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) + ## priorityClassName: "" - ## Additional PostgreSQL Master Volume mounts + ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## - extraVolumeMounts: [ ] - ## Additional PostgreSQL Master Volumes + schedulerName: "" + ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods ## - extraVolumes: [ ] - -## -## PostgreSQL Slave parameters + terminationGracePeriodSeconds: "" + ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type + ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) + ## + extraVolumeMounts: [] + ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) + ## + extraVolumes: [] + ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param primary.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param primary.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) + ## + extraPodSpec: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param primary.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param primary.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param primary.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param primary.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param primary.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param primary.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param primary.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## PostgreSQL Primary service configuration + ## + service: + ## @param primary.service.enabled Enable/disable the PostgreSQL primary service + ## + enabled: true + ## @param primary.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param primary.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param primary.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param primary.service.labels Map of labels to add to the primary service + ## + labels: {} + ## @param primary.service.annotations Annotations for PostgreSQL primary service + ## + annotations: {} + ## @param primary.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param primary.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service + ## + extraPorts: [] + ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service + ## + annotations: {} + ## PostgreSQL Primary persistence configuration + ## + persistence: + ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC + ## + enabled: true + ## @param primary.persistence.volumeName Name to assign the volume + ## + volumeName: "data" + ## @param primary.persistence.existingClaim Name of an existing PVC to use + ## + #existingClaim: "" + ## @param primary.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param primary.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + #subPath: "" + ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "gp2" + ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 75Gi + ## @param primary.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param primary.persistence.labels Labels for the PVC + ## + labels: {} + ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param primary.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## PostgreSQL Primary Persistent Volume Claim Retention Policy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + persistentVolumeClaimRetentionPolicy: + ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset + ## + enabled: true + ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Retain +## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) ## -slave: - ## Node, affinity, tolerations, and priorityclass settings for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +readReplicas: + ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) + ## + name: read + ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas + ## + replicaCount: 1 + ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf + ## + extendedConfiguration: "" + ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsCM: "" + ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes + ## + extraEnvVarsSecret: "" + ## @param readReplicas.command Override default container command (useful when using custom images) + ## + command: [] + ## @param readReplicas.args Override default container args (useful when using custom images) + ## + args: [] + ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers + ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers + ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers + ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe + ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup + ## + lifecycleHooks: {} + ## PostgreSQL read only resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param readReplicas.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if readReplicas.resources is set (readReplicas.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + # resourcesPreset: "nano" + ## @param readReplicas.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: + requests: + cpu: 250m + memory: 256mi + limits: + cpu: 2 + memory: 8Gi + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.podSecurityContext.enabled Enable security context + ## @param readReplicas.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param readReplicas.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param readReplicas.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod + ## + podSecurityContext: + enabled: false + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## @param readReplicas.containerSecurityContext.enabled Enabled containers' Security Context + ## @param readReplicas.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param readReplicas.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param readReplicas.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param readReplicas.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param readReplicas.containerSecurityContext.privileged Set container's Security Context privileged + ## @param readReplicas.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param readReplicas.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param readReplicas.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: false + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param readReplicas.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) + ## + hostNetwork: false + ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) + ## + hostIPC: false + ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) + ## + labels: {} + ## @param readReplicas.annotations Annotations for PostgreSQL read only pods + ## + annotations: {} + ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) + ## + podLabels: {} + ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) + ## + podAnnotations: {} + ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## PostgreSQL read only node affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption - nodeSelector: { } - affinity: { } - tolerations: [ ] - labels: { } - annotations: { } - podLabels: { } - podAnnotations: { } + ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) + ## priorityClassName: "" - ## Additional PostgreSQL Slave Volume mounts + ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## - extraVolumeMounts: [ ] - ## Additional PostgreSQL Slave Volumes + schedulerName: "" + ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods ## - extraVolumes: [ ] - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - requests: - memory: 256Mi - cpu: 250m - limits: - cpu: 2 - memory: 8Gi - -networkPolicy: - ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + terminationGracePeriodSeconds: "" + ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type + ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) + ## + extraVolumeMounts: [] + ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) + ## + extraVolumes: [] + ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) + ## Example + ## + ## initContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + initContainers: [] + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param readReplicas.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param readReplicas.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param readReplicas.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `readReplicas.pdb.minAvailable` and `readReplicas.pdb.maxUnavailable` are empty. ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" + ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) + ## + extraPodSpec: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param readReplicas.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param readReplicas.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param readReplicas.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param readReplicas.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param readReplicas.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param readReplicas.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param readReplicas.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## PostgreSQL read only service configuration + ## + service: + ## @param readReplicas.service.type Kubernetes Service type + ## + type: ClusterIP + ## @param readReplicas.service.ports.postgresql PostgreSQL service port + ## + ports: + postgresql: 5432 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + postgresql: "" + ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services + ## e.g: + ## clusterIP: None + ## + clusterIP: "" + ## @param readReplicas.service.labels Map of labels to add to the read service + ## + labels: {} + ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service + ## + annotations: {} + ## @param readReplicas.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + ## + loadBalancerClass: "" + ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` + ## Set the LoadBalancer service type to internal only + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service + ## + extraPorts: [] + ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service + ## + annotations: {} + ## PostgreSQL read only persistence configuration + ## + persistence: + ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC + ## + enabled: true + ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param readReplicas.persistence.mountPath The path the volume will be mounted at + ## Note: useful when using custom PostgreSQL images + ## + mountPath: /bitnami/postgresql + ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to + ## Useful in dev environments and one PV for multiple services + ## + subPath: "" + ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume + ## + accessModes: + - ReadWriteOnce + ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume + ## + size: 8Gi + ## @param readReplicas.persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param readReplicas.persistence.labels Labels for the PVC + ## + labels: {} + ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param readReplicas.persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## PostgreSQL Read only Persistent Volume Claim Retention Policy + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + persistentVolumeClaimRetentionPolicy: + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset + ## + enabled: false + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced + ## + whenScaled: Retain + ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted + ## + whenDeleted: Retain +## @section Backup parameters +## This section implements a trivial logical dump cronjob of the database. +## This only comes with the consistency guarantees of the dump program. +## This is not a snapshot based roll forward/backward recovery backup. +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ +backup: + ## @param backup.enabled Enable the logical dump of the database "regularly" enabled: false + cronjob: + ## @param backup.cronjob.schedule Set the cronjob parameter schedule + schedule: "@daily" + ## @param backup.cronjob.timeZone Set the cronjob parameter timeZone + timeZone: "" + ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy + concurrencyPolicy: Allow + ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit + failedJobsHistoryLimit: 1 + ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit + successfulJobsHistoryLimit: 3 + ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds + startingDeadlineSeconds: "" + ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished + ttlSecondsAfterFinished: "" + ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy + restartPolicy: OnFailure + ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup + ## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## backup container's Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param backup.cronjob.command Set backup container's command to run + command: + - /bin/bash + - -c + - PGPASSWORD="${PGPASSWORD:-$(< "$PGPASSWORD_FILE")}" pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file="${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump" + ## @param backup.cronjob.labels Set the cronjob labels + labels: {} + ## @param backup.cronjob.annotations Set the cronjob annotations + annotations: {} + ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/ + ## + nodeSelector: {} + ## @param backup.cronjob.tolerations Tolerations for PostgreSQL backup CronJob pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## backup cronjob container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if backup.cronjob.resources is set (backup.cronjob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory + ## Example: + resources: {} + ## resources: + ## requests: + ## cpu: 1 + ## memory: 512Mi + ## limits: + ## cpu: 2 + ## memory: 1024Mi + networkPolicy: + ## @param backup.cronjob.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + storage: + ## @param backup.cronjob.storage.enabled Enable using a `PersistentVolumeClaim` as backup data volume + ## + enabled: true + ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: "" + ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted + ## + resourcePolicy: "" + ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param backup.cronjob.storage.accessModes PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume + ## + size: 8Gi + ## @param backup.cronjob.storage.annotations PVC annotations + ## + annotations: {} + ## @param backup.cronjob.storage.mountPath Path to mount the volume at + ## + mountPath: /backup/pgdump + ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at + ## and one PV for multiple services. + ## + subPath: "" + ## Fine tuning for volumeClaimTemplates + ## + volumeClaimTemplates: + ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) + ## A label query over volumes to consider for binding (e.g. when using local volumes) + ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details + ## + selector: {} + ## @param backup.cronjob.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the backup container + ## + extraVolumeMounts: [] + ## @param backup.cronjob.extraVolumes Optionally specify extra list of additional volumes for the backup container + ## + extraVolumes: [] - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port PostgreSQL is listening - ## on. When true, PostgreSQL will accept connections from any source - ## (with the correct destination port). +## @section Password update job +## +passwordUpdateJob: + ## @param passwordUpdateJob.enabled Enable password update job ## - allowExternal: true - - ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the DB. - ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - # explicitNamespacesSelector: - # matchLabels: - # role: frontend - # matchExpressions: - # - {key: role, operator: In, values: [frontend]} + enabled: false + ## @param passwordUpdateJob.backoffLimit set backoff limit of the job + ## + backoffLimit: 10 + ## @param passwordUpdateJob.command Override default container command on mysql Primary container(s) (useful when using custom images) + ## + command: [] + ## @param passwordUpdateJob.args Override default container args on mysql Primary container(s) (useful when using custom images) + ## + args: [] + ## @param passwordUpdateJob.extraCommands Extra commands to pass to the generation job + ## + extraCommands: "" + ## @param passwordUpdateJob.previousPasswords.postgresPassword Previous postgres password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.password Previous password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.replicationPassword Previous replication password (set if the password secret was already changed) + ## @param passwordUpdateJob.previousPasswords.existingSecret Name of a secret containing the previous passwords (set if the password secret was already changed) + previousPasswords: + postgresPassword: "" + password: "" + replicationPassword: "" + existingSecret: "" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param passwordUpdateJob.containerSecurityContext.enabled Enabled containers' Security Context + ## @param passwordUpdateJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param passwordUpdateJob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param passwordUpdateJob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param passwordUpdateJob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param passwordUpdateJob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param passwordUpdateJob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param passwordUpdateJob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param passwordUpdateJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param passwordUpdateJob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param passwordUpdateJob.podSecurityContext.enabled Enabled credential init job pods' Security Context + ## @param passwordUpdateJob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param passwordUpdateJob.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param passwordUpdateJob.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param passwordUpdateJob.podSecurityContext.fsGroup Set credential init job pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## @param passwordUpdateJob.extraEnvVars Array containing extra env vars to configure the credential init job + ## For example: + ## extraEnvVars: + ## - name: GF_DEFAULT_INSTANCE_NAME + ## value: my-instance + ## + extraEnvVars: [] + ## @param passwordUpdateJob.extraEnvVarsCM ConfigMap containing extra env vars to configure the credential init job + ## + extraEnvVarsCM: "" + ## @param passwordUpdateJob.extraEnvVarsSecret Secret containing extra env vars to configure the credential init job (in case of sensitive data) + ## + extraEnvVarsSecret: "" + ## @param passwordUpdateJob.extraVolumes Optionally specify extra list of additional volumes for the credential init job + ## + extraVolumes: [] + ## @param passwordUpdateJob.extraVolumeMounts Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`. + ## + extraVolumeMounts: [] + ## @param passwordUpdateJob.initContainers Add additional init containers for the mysql Primary pod(s) + ## + initContainers: [] + ## Container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param passwordUpdateJob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if passwordUpdateJob.resources is set (passwordUpdateJob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "micro" + ## @param passwordUpdateJob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param passwordUpdateJob.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param passwordUpdateJob.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param passwordUpdateJob.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param passwordUpdateJob.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param passwordUpdateJob.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param passwordUpdateJob.annotations [object] Add annotations to the job + ## + annotations: {} + ## @param passwordUpdateJob.podLabels Additional pod labels + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param passwordUpdateJob.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) -livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 +## @section Volume Permissions parameters +## -readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + #repository: bitnami/os-shell + #tag: 12-debian-12-r51 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container + ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container + ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container + ## + containerSecurityContext: + seLinuxOptions: {} + runAsUser: 0 + runAsGroup: 0 + runAsNonRoot: false + seccompProfile: + type: RuntimeDefault +## @section Other Parameters +## -## Configure metrics exporter +## @param serviceBindings.enabled Create secret for service binding (Experimental) +## Ref: https://servicebinding.io/service-provider/ +## +serviceBindings: + enabled: false +## Service account for PostgreSQL to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Creates role for ServiceAccount +## @param rbac.create Create Role and RoleBinding (required for PSP to work) +## +rbac: + create: false + ## @param rbac.rules Custom RBAC rules to set + ## e.g: + ## rules: + ## - apiGroups: + ## - "" + ## resources: + ## - pods + ## verbs: + ## - get + ## - list + ## + rules: [] +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later +## +psp: + create: false +## @section Metrics Parameters ## metrics: + ## @param metrics.enabled Start a prometheus exporter + ## enabled: false - # resources: {} - service: - type: ClusterIP - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "9187" - loadBalancerIP: - serviceMonitor: - enabled: false - additionalLabels: { } - # namespace: monitoring - # interval: 30s - # scrapeTimeout: 10s - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - prometheusRule: - enabled: false - additionalLabels: { } - namespace: "" - rules: [ ] - ## These are just examples rules, please adapt them to your needs. - ## Make sure to constraint the rules to the current postgresql service. - # - alert: HugeReplicationLag - # expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 - # for: 1m - # labels: - # severity: critical - # annotations: - # description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). - # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## @param metrics.image.registry [default: REGISTRY_NAME] PostgreSQL Prometheus Exporter image registry + ## @param metrics.image.repository [default: REPOSITORY_NAME/postgres-exporter] PostgreSQL Prometheus Exporter image repository + ## @skip metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) + ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy + ## @param metrics.image.pullSecrets Specify image pull secrets + ## image: registry: docker.io - repository: bitnami/postgres-exporter - tag: 0.8.0-debian-10-r4 + #repository: bitnami/postgres-exporter + #tag: 0.17.1-debian-12-r16 + digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName - ## Define additional custom metrics - ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file - # customMetrics: - # pg_database: - # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" - # metrics: - # - name: - # usage: "LABEL" - # description: "Name of the database" - # - size_bytes: - # usage: "GAUGE" - # description: "Size of the database in bytes" - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + pullSecrets: [] + ## @param metrics.collectors Control enabled collectors + ## ref: https://github.com/prometheus-community/postgres_exporter#flags + ## Example: + ## collectors: + ## wal: false + collectors: {} + ## @param metrics.customMetrics Define additional custom metrics + ## ref: https://github.com/prometheus-community/postgres_exporter#adding-new-metrics-via-a-config-file-deprecated + ## customMetrics: + ## pg_database: + ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + ## metrics: + ## - name: + ## usage: "LABEL" + ## description: "Name of the database" + ## - size_bytes: + ## usage: "GAUGE" + ## description: "Size of the database in bytes" ## - securityContext: - enabled: false + customMetrics: {} + ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter + ## see: https://github.com/prometheus-community/postgres_exporter#environment-variables + ## For example: + ## extraEnvVars: + ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + ## value: "true" + ## + extraEnvVars: [] + ## PostgreSQL Prometheus exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context + ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged + ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} runAsUser: 1001 - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) - ## Configure extra options for liveness and readiness probes + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe + ## livenessProbe: enabled: true initialDelaySeconds: 5 @@ -465,7 +1804,13 @@ metrics: timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 - + ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe + ## readinessProbe: enabled: true initialDelaySeconds: 5 @@ -473,3 +1818,131 @@ metrics: timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 + ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers + ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe + ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port + ## + containerPorts: + metrics: 9187 + ## PostgreSQL Prometheus exporter resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Service configuration + ## + service: + ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port + ## + ports: + metrics: 9187 + ## @param metrics.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "postgresql.v1.chart.fullname" .) }}"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ include "postgresql.v1.chart.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] From 39e19d35375545a4ab6a31acdd1a2a699eef5124 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 12 Feb 2026 20:52:07 -0500 Subject: [PATCH 35/47] Fixed download error. --- .../modules/k8s_data_layer/postgresql.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index cefa1d2..d6701e9 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -10,6 +10,7 @@ resource "helm_release" "postgresql" { chart = "postgresql" namespace = "core" version = "18.3.0" + verify = false # Temporarily necessery values = [ file("${path.module}/postgresql/postgresql.yaml") From 7cd30236cad190b7aa3ed63c4a9144ea8afa2f87 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 13 Feb 2026 13:18:44 -0500 Subject: [PATCH 36/47] Remove potential chart and folder naming conflicts. --- .../modules/k8s_data_layer/mongodb_apps.tf | 2 +- .../mongodb-apps-values.yaml | 0 .../modules/k8s_data_layer/postgresql.tf | 2 +- .../{postgresql => postgresql_values}/postgresql.yaml | 0 terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf | 2 +- .../k8s_data_layer/{rabbitmq => rabbitmq_values}/rabbitmq.yaml | 0 terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf | 2 +- .../modules/k8s_data_layer/{redis => redis_values}/redis-db.md | 0 .../modules/k8s_data_layer/{redis => redis_values}/redis.yaml | 0 9 files changed, 4 insertions(+), 4 deletions(-) rename terraform-k8s-infrastructure/modules/k8s_data_layer/{mongodb_apps => mongodb_apps_values}/mongodb-apps-values.yaml (100%) rename terraform-k8s-infrastructure/modules/k8s_data_layer/{postgresql => postgresql_values}/postgresql.yaml (100%) rename terraform-k8s-infrastructure/modules/k8s_data_layer/{rabbitmq => rabbitmq_values}/rabbitmq.yaml (100%) rename terraform-k8s-infrastructure/modules/k8s_data_layer/{redis => redis_values}/redis-db.md (100%) rename terraform-k8s-infrastructure/modules/k8s_data_layer/{redis => redis_values}/redis.yaml (100%) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps.tf index 6d334ef..3e15847 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps.tf @@ -5,7 +5,7 @@ resource "helm_release" "mongodb_apps" { version = "3.15.0" values = [ - file("${path.module}/mongodb_apps/mongodb-apps-values.yaml") + file("${path.module}/mongodb_apps_values/mongodb-apps-values.yaml") ] } diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps/mongodb-apps-values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps_values/mongodb-apps-values.yaml similarity index 100% rename from terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps/mongodb-apps-values.yaml rename to terraform-k8s-infrastructure/modules/k8s_data_layer/mongodb_apps_values/mongodb-apps-values.yaml diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index d6701e9..fcc399d 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -13,7 +13,7 @@ resource "helm_release" "postgresql" { verify = false # Temporarily necessery values = [ - file("${path.module}/postgresql/postgresql.yaml") + file("${path.module}/postgresql_values/postgresql.yaml") ] depends_on = [ diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml similarity index 100% rename from terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql/postgresql.yaml rename to terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf index 5cb57f7..a00027d 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf @@ -12,7 +12,7 @@ resource "helm_release" "rabbitmq" { version = "6.18.2" values = [ - file("${path.module}/rabbitmq/rabbitmq.yaml") + file("${path.module}/rabbitmq_values/rabbitmq.yaml") ] depends_on = [ diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq/rabbitmq.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml similarity index 100% rename from terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq/rabbitmq.yaml rename to terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf index 094aebf..9f38b23 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf @@ -6,7 +6,7 @@ resource "helm_release" "redis" { version = "16.13.2" values = [ - file("${path.module}/redis/redis.yaml") + file("${path.module}/redis_values/redis.yaml") ] // Enable force update and pod recreation diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis-db.md b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis_values/redis-db.md similarity index 100% rename from terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis-db.md rename to terraform-k8s-infrastructure/modules/k8s_data_layer/redis_values/redis-db.md diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis_values/redis.yaml similarity index 100% rename from terraform-k8s-infrastructure/modules/k8s_data_layer/redis/redis.yaml rename to terraform-k8s-infrastructure/modules/k8s_data_layer/redis_values/redis.yaml From c49bec4a684ed470f9c3ba34aa074e438d6529aa Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 13 Feb 2026 15:35:13 -0500 Subject: [PATCH 37/47] Attemp to update rabbitmq as well. --- .../modules/k8s_data_layer/rabbitmq.tf | 2 +- .../rabbitmq_values/rabbitmq.yaml | 105 +++++++++--------- 2 files changed, 54 insertions(+), 53 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf index a00027d..9dddc10 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf @@ -9,7 +9,7 @@ resource "helm_release" "rabbitmq" { repository = "https://charts.bitnami.com/bitnami" chart = "rabbitmq" namespace = "core" - version = "6.18.2" + version = "16.0.14" values = [ file("${path.module}/rabbitmq_values/rabbitmq.yaml") diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml index ebe7793..cbe351c 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml @@ -47,7 +47,9 @@ image: # schedulerName: ## does your cluster have rbac enabled? assume yes by default -rbacEnabled: true +#rbacEnabled: true +rbac: + create: true ## RabbitMQ should be initialized one by one when building cluster for the first time. ## Therefore, the default value of podManagementPolicy is 'OrderedReady' @@ -62,7 +64,7 @@ rbacEnabled: true podManagementPolicy: OrderedReady ## section of specific values for rabbitmq -rabbitmq: +auth: ## RabbitMQ application username ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## @@ -85,60 +87,60 @@ rabbitmq: ## # rabbitmqClusterNodeName: - ## Value for the RABBITMQ_LOGS environment variable - ## ref: https://www.rabbitmq.com/logging.html#log-file-location - ## - logs: '-' +## Value for the RABBITMQ_LOGS environment variable +## ref: https://www.rabbitmq.com/logging.html#log-file-location +## +logs: '-' - ## RabbitMQ Max File Descriptors - ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables - ## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits - ## - setUlimitNofiles: true - ulimitNofiles: '65536' +## RabbitMQ Max File Descriptors +## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables +## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits +## +# setUlimitNofiles: true # No longer used it seems +ulimitNofiles: '65536' - ## RabbitMQ maximum available scheduler threads and online scheduler threads - ## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads - ## - maxAvailableSchedulers: 2 - onlineSchedulers: 1 +## RabbitMQ maximum available scheduler threads and online scheduler threads +## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads +## +maxAvailableSchedulers: 2 +onlineSchedulers: 1 - ## Plugins to enable - plugins: "rabbitmq_management" +## Plugins to enable +plugins: "rabbitmq_management" - ## Extra plugins to enable - ## Use this instead of `plugins` to add new plugins - # extraPlugins: "rabbitmq_auth_backend_ldap" +## Extra plugins to enable +## Use this instead of `plugins` to add new plugins +# extraPlugins: "rabbitmq_auth_backend_ldap" - ## Clustering settings - clustering: - address_type: hostname - k8s_domain: cluster.local - ## Rebalance master for queues in cluster when new replica is created - ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance - rebalance: false +## Clustering settings +clustering: + address_type: hostname + #k8s_domain: cluster.local # Apparently no longer used + ## Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + rebalance: false - loadDefinition: - enabled: false - secretName: load-definition - - ## environment variables to configure rabbitmq - ## ref: https://www.rabbitmq.com/configure.html#customise-environment - env: { } - - ## Configuration file content: required cluster configuration - ## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead - configuration: |- - # queue master locator - queue_master_locator=min-masters - # enable guest user - loopback_users.guest = false - - ## Configuration file content: extra configuration - ## Use this instead of `configuration` to add more configuration - extraConfiguration: |- - #disk_free_limit.absolute = 50MB - #management.load_definitions = /app/load_definition.json +loadDefinition: + enabled: false + existingSecret: load-definition + +## environment variables to configure rabbitmq +## ref: https://www.rabbitmq.com/configure.html#customise-environment +env: { } + +## Configuration file content: required cluster configuration +## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead +configuration: |- + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + +## Configuration file content: extra configuration +## Use this instead of `configuration` to add more configuration +extraConfiguration: |- + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json ## Configuration file content: advanced configuration ## Use this as additional configuraton in classic config format (Erlang term configuration format) @@ -417,8 +419,7 @@ volumePermissions: ## unknown order. ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot ## -forceBoot: - enabled: false +forceBoot: false ## Optionally specify extra secrets to be created by the chart. ## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. From 12965ef7f087ef5a546c4c4ec35984012ebcfdff Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 13 Feb 2026 16:26:00 -0500 Subject: [PATCH 38/47] Use OCI URI's. --- .../modules/k8s_data_layer/postgresql.tf | 2 +- terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf | 2 +- terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index fcc399d..bfbda25 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -6,7 +6,7 @@ data "kubernetes_secret" "postgresql_core" { } resource "helm_release" "postgresql" { name = "postgresql" - repository = "https://charts.bitnami.com/bitnami" + repository = "oci://registry-1.docker.io/bitnamicharts" chart = "postgresql" namespace = "core" version = "18.3.0" diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf index 9dddc10..af12e3e 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf @@ -6,7 +6,7 @@ data "kubernetes_secret" "rabbitmq_core" { } resource "helm_release" "rabbitmq" { name = "rabbitmq" - repository = "https://charts.bitnami.com/bitnami" + repository = "oci://registry-1.docker.io/bitnamicharts" chart = "rabbitmq" namespace = "core" version = "16.0.14" diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf index 9f38b23..f5996a7 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf @@ -1,6 +1,6 @@ resource "helm_release" "redis" { name = "redis" - repository = "https://charts.bitnami.com/bitnami" + repository = "oci://registry-1.docker.io/bitnamicharts" chart = "redis" namespace = "core" version = "16.13.2" From 0cc579b735f7809af44f8f23fed70f3bfaf46e9e Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 13 Feb 2026 18:22:49 -0500 Subject: [PATCH 39/47] Fix image tags. --- .../k8s_data_layer/postgresql_values/postgresql.yaml | 10 +++++----- .../k8s_data_layer/rabbitmq_values/rabbitmq.yaml | 4 +++- .../modules/k8s_data_layer/redis.tf | 4 +++- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml index 2c3848f..504bec4 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml @@ -23,7 +23,7 @@ global: ## security: ## @param global.security.allowInsecureImages Allows skipping image verification - allowInsecureImages: false + allowInsecureImages: true #postgresql: ## @param global.postgresql.fullnameOverride Full chart name (overrides `fullnameOverride`) ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) @@ -120,8 +120,8 @@ diagnosticMode: ## image: registry: docker.io - repository: bitnami/postgresql - tag: 9.6-debian-10 + repository: bitnamilegacy/postgresql + tag: 9.6.10-r64 digest: "" ## Specify a imagePullPolicy ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images @@ -484,7 +484,7 @@ primary: resources: requests: cpu: 250m - memory: 256mi + memory: 256Mi limits: cpu: 2 memory: 8Gi @@ -959,7 +959,7 @@ readReplicas: resources: requests: cpu: 250m - memory: 256mi + memory: 256Mi limits: cpu: 2 memory: 8Gi diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml index cbe351c..876d615 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml @@ -7,6 +7,8 @@ global: imagePullSecrets: - regcred # storageClass: myStorageClass + security: + allowInsecureImages: true ## Bitnami RabbitMQ image version ## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ @@ -14,7 +16,7 @@ global: image: registry: docker.io repository: bitnamilegacy/rabbitmq - tag: 3.7-debian-9 + tag: 3.7.9-r40 ## set to true if you would like to see extra information on logs ## it turns BASH and NAMI debugging in minideb diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf index f5996a7..8470b1a 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf @@ -1,6 +1,8 @@ resource "helm_release" "redis" { name = "redis" - repository = "oci://registry-1.docker.io/bitnamicharts" + // For some reason the OCI: registry doesn't work here, even though + // it is required for the others? + repository = "https://charts.bitnami.com/bitnami" chart = "redis" namespace = "core" version = "16.13.2" From 28f8be6abdd759f3775cfd5fd4848a74902b33e7 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 13 Feb 2026 23:26:39 -0500 Subject: [PATCH 40/47] Extend helm timeouts to 20 min. --- .../modules/k8s_data_layer/postgresql.tf | 1 + terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf | 1 + terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf | 1 + 3 files changed, 3 insertions(+) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index bfbda25..9b27049 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -11,6 +11,7 @@ resource "helm_release" "postgresql" { namespace = "core" version = "18.3.0" verify = false # Temporarily necessery + timeout = 1200 values = [ file("${path.module}/postgresql_values/postgresql.yaml") diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf index af12e3e..2b59167 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf @@ -10,6 +10,7 @@ resource "helm_release" "rabbitmq" { chart = "rabbitmq" namespace = "core" version = "16.0.14" + timeout = 1200 values = [ file("${path.module}/rabbitmq_values/rabbitmq.yaml") diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf index 8470b1a..acf1fb9 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf @@ -6,6 +6,7 @@ resource "helm_release" "redis" { chart = "redis" namespace = "core" version = "16.13.2" + timeout = 1200 values = [ file("${path.module}/redis_values/redis.yaml") From 45ea8059aa25b1a40f098068db88317308643f06 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Fri, 20 Feb 2026 15:33:48 -0500 Subject: [PATCH 41/47] Fix pod name, try resolving hba.conf path issue. --- .../modules/k8s_data_layer/postgresql.tf | 1 - .../postgresql_values/postgresql.yaml | 34 +++++++++++-------- .../modules/k8s_data_layer/rabbitmq.tf | 1 - .../modules/k8s_data_layer/redis.tf | 1 - 4 files changed, 20 insertions(+), 17 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index 9b27049..bfbda25 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -11,7 +11,6 @@ resource "helm_release" "postgresql" { namespace = "core" version = "18.3.0" verify = false # Temporarily necessery - timeout = 1200 values = [ file("${path.module}/postgresql_values/postgresql.yaml") diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml index 504bec4..b01ff08 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml @@ -7,7 +7,7 @@ global: ## @param global.imageRegistry Global Docker image registry ## - imageRegistry: "" + #imageRegistry: "" ## @param global.imagePullSecrets Global Docker registry secret names as an array ## e.g. ## imagePullSecrets: @@ -15,10 +15,10 @@ global: ## imagePullSecrets: [] ## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) -## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead + ## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead ## - defaultStorageClass: "" - storageClass: "" + #defaultStorageClass: "" + #storageClass: "" ## Security parameters ## security: @@ -66,16 +66,16 @@ global: ## @param kubeVersion Override Kubernetes version ## -kubeVersion: "" +#kubeVersion: "" ## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) ## -nameOverride: "" +#nameOverride: "postgresql" ## @param fullnameOverride String to fully override common.names.fullname template ## -fullnameOverride: "" +fullnameOverride: "postgresql-postgresql" ## @param namespaceOverride String to fully override common.names.namespace ## -namespaceOverride: "" +#namespaceOverride: "" ## @param clusterDomain Kubernetes Cluster Domain ## clusterDomain: cluster.local @@ -320,7 +320,11 @@ primary: ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html ## e.g:# - ## pgHbaConfiguration: |- + #pgHbaConfiguration: |- + # host all all 0.0.0.0/0 scram-sha-256 + # Allow local connections + # local all all trust + ## local all all trust ## host all all localhost trust ## host mydatabase mysuser 192.168.0.0/24 md5 @@ -333,7 +337,9 @@ primary: ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf ## - # extendedConfiguration: "" + #extendedConfiguration: |- + # hba_file = '/bitnami/postgresql/conf/pg_hba.conf' + ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration ## NOTE: `primary.extendedConfiguration` will be ignored ## @@ -498,7 +504,7 @@ primary: ## @param primary.podSecurityContext.fsGroup Group ID for the pod ## podSecurityContext: - enabled: false + enabled: true fsGroupChangePolicy: Always sysctls: [] supplementalGroups: [] @@ -1613,8 +1619,8 @@ volumePermissions: ## image: registry: docker.io - #repository: bitnami/os-shell - #tag: 12-debian-12-r51 + repository: bitnamilegacy/os-shell + tag: 12-debian-12-r51 digest: "" pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -1672,7 +1678,7 @@ serviceBindings: serviceAccount: ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod ## - create: true + create: false ## @param serviceAccount.name The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the common.names.fullname template ## diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf index 2b59167..af12e3e 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf @@ -10,7 +10,6 @@ resource "helm_release" "rabbitmq" { chart = "rabbitmq" namespace = "core" version = "16.0.14" - timeout = 1200 values = [ file("${path.module}/rabbitmq_values/rabbitmq.yaml") diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf index acf1fb9..8470b1a 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/redis.tf @@ -6,7 +6,6 @@ resource "helm_release" "redis" { chart = "redis" namespace = "core" version = "16.13.2" - timeout = 1200 values = [ file("${path.module}/redis_values/redis.yaml") From 0439277dfb89f6ec935d5cc430bc33e176f5eb1c Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Mon, 23 Feb 2026 20:53:17 -0500 Subject: [PATCH 42/47] Vend older postgresql helm chart and fix values / pvc. --- scripts/README.md | 4 + scripts/pvc-debugger.yaml | 17 + .../charts/postgresql/Chart.yaml | 22 + .../charts/postgresql/README.md | 567 +++++ .../charts/postgresql/ci/default-values.yaml | 1 + .../ci/shmvolume-disabled-values.yaml | 2 + .../charts/postgresql/files/README.md | 1 + .../charts/postgresql/files/conf.d/README.md | 4 + .../docker-entrypoint-initdb.d/README.md | 3 + .../charts/postgresql/templates/NOTES.txt | 60 + .../charts/postgresql/templates/_helpers.tpl | 420 ++++ .../postgresql/templates/configmap.yaml | 26 + .../templates/extended-config-configmap.yaml | 21 + .../templates/initialization-configmap.yaml | 24 + .../templates/metrics-configmap.yaml | 13 + .../postgresql/templates/metrics-svc.yaml | 26 + .../postgresql/templates/networkpolicy.yaml | 38 + .../postgresql/templates/prometheusrule.yaml | 23 + .../charts/postgresql/templates/secrets.yaml | 23 + .../postgresql/templates/serviceaccount.yaml | 11 + .../postgresql/templates/servicemonitor.yaml | 33 + .../templates/statefulset-slaves.yaml | 299 +++ .../postgresql/templates/statefulset.yaml | 458 ++++ .../postgresql/templates/svc-headless.yaml | 19 + .../charts/postgresql/templates/svc-read.yaml | 31 + .../charts/postgresql/templates/svc.yaml | 38 + .../charts/postgresql/values-production.yaml | 520 ++++ .../charts/postgresql/values.schema.json | 103 + .../charts/postgresql/values.yaml | 526 ++++ .../modules/k8s_data_layer/postgresql.tf | 8 +- .../postgresql_values/postgresql.yaml | 2239 +++-------------- .../modules/k8s_data_layer/rabbitmq.tf | 7 +- 32 files changed, 3721 insertions(+), 1866 deletions(-) create mode 100644 scripts/README.md create mode 100644 scripts/pvc-debugger.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/Chart.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/README.md create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/default-values.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/shmvolume-disabled-values.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/README.md create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/conf.d/README.md create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/docker-entrypoint-initdb.d/README.md create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/NOTES.txt create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/_helpers.tpl create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/configmap.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/extended-config-configmap.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/initialization-configmap.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-configmap.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-svc.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/networkpolicy.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/prometheusrule.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/secrets.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/serviceaccount.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/servicemonitor.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset-slaves.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-headless.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-read.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values-production.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.schema.json create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.yaml diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..e8811bb --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,4 @@ +# PVC Debugger + +export PVC_NAME= +envsubst < pvc-debugger.yaml | kubectl apply -n -f - diff --git a/scripts/pvc-debugger.yaml b/scripts/pvc-debugger.yaml new file mode 100644 index 0000000..dcf0512 --- /dev/null +++ b/scripts/pvc-debugger.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Pod +metadata: + name: volume-debugger +spec: + volumes: + - name: volume-to-debug + persistentVolumeClaim: + claimName: ${PVC_NAME} # Replace with your PVC name + containers: + - name: debugger + image: busybox:stable + command: ['sleep', '3600'] # Keeps the pod running + volumeMounts: + - mountPath: "/data" # The path where the volume will be mounted + name: volume-to-debug + restartPolicy: Never diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/Chart.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/Chart.yaml new file mode 100644 index 0000000..30953ee --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +name: postgresql +version: 8.6.4 +appVersion: 11.7.0 +description: Chart for PostgreSQL, an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. +keywords: + - postgresql + - postgres + - database + - sql + - replication + - cluster +home: https://www.postgresql.org/ +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +sources: + - https://github.com/bitnami/bitnami-docker-postgresql +maintainers: + - name: Bitnami + email: containers@bitnami.com + - name: desaintmartin + email: cedric@desaintmartin.fr +engine: gotpl diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/README.md b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/README.md new file mode 100644 index 0000000..4a7c501 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/README.md @@ -0,0 +1,567 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR; + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-postgres-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL admin user | `postgres` | +| `postgresqlPassword` | PostgreSQL admin password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUsername` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUsername` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service, the value is evaluated as a template. | {} | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | [] | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAcccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `nil` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=12.0.0` + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +helm install postgres \ + --set image.repository=postgres \ + --set image.tag=10.6 \ + --set postgresqlDataDir=/data/pgdata \ + --set persistence.mountPath=/data/ \ + bitnami/postgresql +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + + - protobuf + - protobuf-c + - json-c + - geos + - proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```bash +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + + ```console +$ kubectl get svc + ``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/default-values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/shmvolume-disabled-values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100644 index 0000000..347d3b4 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/README.md b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/README.md new file mode 100644 index 0000000..1813a2f --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/conf.d/README.md b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/conf.d/README.md new file mode 100644 index 0000000..184c187 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100644 index 0000000..cba3809 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/NOTES.txt b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/NOTES.txt new file mode 100644 index 0000000..3b5e6c6 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,60 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/_helpers.tpl b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/_helpers.tpl new file mode 100644 index 0000000..3ee5572 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,420 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" .Values.global.postgresql.existingSecret -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" .Values.existingSecret -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if .Values.global.postgresql.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "postgresql.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "postgresql.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/configmap.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/configmap.yaml new file mode 100644 index 0000000..d2178c0 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/extended-config-configmap.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100644 index 0000000..8a41195 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/initialization-configmap.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/initialization-configmap.yaml new file mode 100644 index 0000000..8eb5e05 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-configmap.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-configmap.yaml new file mode 100644 index 0000000..524aa2f --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-svc.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-svc.yaml new file mode 100644 index 0000000..c610f09 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: +{{ toYaml .Values.metrics.service.annotations | indent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/networkpolicy.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/networkpolicy.yaml new file mode 100644 index 0000000..ea1fc9b --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,38 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/prometheusrule.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/prometheusrule.yaml new file mode 100644 index 0000000..44f1242 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/secrets.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/secrets.yaml new file mode 100644 index 0000000..094d18b --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/serviceaccount.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/serviceaccount.yaml new file mode 100644 index 0000000..27e5b51 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + name: {{ template "postgresql.fullname" . }} +{{- end }} \ No newline at end of file diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/servicemonitor.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/servicemonitor.yaml new file mode 100644 index 0000000..f3a529a --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset-slaves.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100644 index 0000000..3290ff7 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,299 @@ +{{- if .Values.replication.enabled }} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.slave.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{ if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset.yaml new file mode 100644 index 0000000..3390be2 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,458 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.master.labels }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- with .Values.master.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + role: master +{{- with .Values.master.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.master.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: +{{ toYaml .Values.master.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: +{{ toYaml .Values.master.affinity | indent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: +{{ toYaml .Values.master.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled)) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{ if .Values.persistence.enabled }} + mkdir -p {{ .Values.persistence.mountPath }}/data + chmod 700 {{ .Values.persistence.mountPath }}/data + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: + {{- else }} + securityContext: + runAsUser: {{ .Values.volumePermissions.securityContext.runAsUser }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} +{{ tpl .Values.master.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: .Values.initdbPassword + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.replication.enabled }} + - name: POSTGRES_REPLICATION_MODE + value: "master" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "postgresql.tplValue" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end}} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote}} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ .Values.extraEnvVarsCM }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d {{ (include "postgresql.database" .) | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "postgresql.tplValue" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.metrics.securityContext.runAsUser }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=disable" (int (include "postgresql.port" .)) $database | quote }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-headless.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-headless.yaml new file mode 100644 index 0000000..5c71f46 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-read.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-read.yaml new file mode 100644 index 0000000..d9492e2 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,31 @@ +{{- if .Values.replication.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: slave +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc.yaml new file mode 100644 index 0000000..0baea4a --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/templates/svc.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + app: {{ template "postgresql.name" . }} + chart: {{ template "postgresql.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.service.annotations }} + annotations: +{{ tpl (toYaml .) $ | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} + {{- end }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: + app: {{ template "postgresql.name" . }} + release: {{ .Release.Name | quote }} + role: master diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values-production.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values-production.yaml new file mode 100644 index 0000000..2032392 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values-production.yaml @@ -0,0 +1,520 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r26 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: HugeReplicationLag + # expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r42 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.schema.json b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.schema.json new file mode 100644 index 0000000..ac2de6e --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "condition": false, + "value": "replication.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.yaml new file mode 100644 index 0000000..5b035ef --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/postgresql/values.yaml @@ -0,0 +1,526 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.7.0-debian-10-r26 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "off" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + sidecars: [] + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: HugeReplicationLag + # expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r42 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf index bfbda25..5d03372 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql.tf @@ -6,11 +6,11 @@ data "kubernetes_secret" "postgresql_core" { } resource "helm_release" "postgresql" { name = "postgresql" - repository = "oci://registry-1.docker.io/bitnamicharts" - chart = "postgresql" + #repository = "oci://registry-1.docker.io/bitnamicharts" + chart = "${path.module}/charts/postgresql" namespace = "core" - version = "18.3.0" - verify = false # Temporarily necessery + #version = "18.3.0" + #verify = false # Temporarily necessery values = [ file("${path.module}/postgresql_values/postgresql.yaml") diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml index b01ff08..13aaf15 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/postgresql_values/postgresql.yaml @@ -1,596 +1,299 @@ -# Copyright Broadcom, Inc. All Rights Reserved. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Please, note that this will override the parameters, including dependencies, configured to use the global value +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets ## global: - ## @param global.imageRegistry Global Docker image registry - ## - #imageRegistry: "" - ## @param global.imagePullSecrets Global Docker registry secret names as an array - ## e.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - ## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) - ## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead - ## - #defaultStorageClass: "" - #storageClass: "" - ## Security parameters - ## - security: - ## @param global.security.allowInsecureImages Allows skipping image verification - allowInsecureImages: true - #postgresql: - ## @param global.postgresql.fullnameOverride Full chart name (overrides `fullnameOverride`) - ## @param global.postgresql.auth.postgresPassword Password for the "postgres" admin user (overrides `auth.postgresPassword`) - ## @param global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`) - ## @param global.postgresql.auth.password Password for the custom user to create (overrides `auth.password`) - ## @param global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`) - ## @param global.postgresql.auth.existingSecret Name of existing secret to use for PostgreSQL credentials (overrides `auth.existingSecret`). - ## @param global.postgresql.auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.adminPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## @param global.postgresql.auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.userPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## @param global.postgresql.auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials (overrides `auth.secretKeys.replicationPasswordKey`). Only used when `global.postgresql.auth.existingSecret` is set. - ## - #fullnameOverride: "" - - #auth: - # postgresPassword: "" - # username: "" - # password: "" - # database: "" - # existingSecret: "" - # secretKeys: - # adminPasswordKey: "" - # userPasswordKey: "" - # replicationPasswordKey: "" - ## @param global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`) - ## - #service: - # ports: - # postgresql: "" - ## Compatibility adaptations for Kubernetes platforms - ## - compatibility: - ## Compatibility adaptations for Openshift - ## - openshift: - ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) - ## - adaptSecurityContext: auto -## @section Common parameters -## - -## @param kubeVersion Override Kubernetes version -## -#kubeVersion: "" -## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) -## -#nameOverride: "postgresql" -## @param fullnameOverride String to fully override common.names.fullname template -## -fullnameOverride: "postgresql-postgresql" -## @param namespaceOverride String to fully override common.names.namespace -## -#namespaceOverride: "" -## @param clusterDomain Kubernetes Cluster Domain -## -clusterDomain: cluster.local -## @param extraDeploy Array of extra objects to deploy with the release (evaluated as a template) -## -extraDeploy: [] -## @param commonLabels Add labels to all the deployed resources -## -commonLabels: {} -## @param commonAnnotations Add annotations to all the deployed resources -## -commonAnnotations: {} -## @param secretAnnotations Add annotations to the secrets -## -secretAnnotations: {} -## Enable diagnostic mode in the statefulset -## -diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the statefulset - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the statefulset - ## - args: - - infinity -## @section PostgreSQL common parameters -## + postgresql: { } +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass ## Bitnami PostgreSQL image version ## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ -## @param image.registry [default: REGISTRY_NAME] PostgreSQL image registry -## @param image.repository [default: REPOSITORY_NAME/postgresql] PostgreSQL image repository -## @skip image.tag PostgreSQL image tag (immutable tags are recommended) -## @param image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag -## @param image.pullPolicy PostgreSQL image pull policy -## @param image.pullSecrets Specify image pull secrets -## @param image.debug Specify if debug values should be set ## image: registry: docker.io repository: bitnamilegacy/postgresql tag: 9.6.10-r64 - digest: "" ## Specify a imagePullPolicy - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName ## - pullSecrets: [] + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs - ## + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging debug: false -## Authentication parameters -## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#setting-the-root-password-on-first-run -## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-on-first-run -## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#creating-a-database-user-on-first-run + +## String to partially override postgresql.fullname template (will maintain the release name) ## -auth: - ## @param auth.enablePostgresUser Assign a password to the "postgres" admin user. Otherwise, remote access will be blocked for this user - ## - enablePostgresUser: true - ## @param auth.postgresPassword Password for the "postgres" admin user. Ignored if `auth.existingSecret` is provided - ## - # postgresPassword: "" - ## @param auth.username Name for a custom user to create - ## - # username: "" - ## @param auth.password Password for the custom user to create. Ignored if `auth.existingSecret` is provided - ## - # password: "" - ## @param auth.database Name for a custom database to create - ## - # database: "" - ## @param auth.replicationUsername Name of the replication user - ## - # replicationUsername: repl_user - ## @param auth.replicationPassword Password for the replication user. Ignored if `auth.existingSecret` is provided - ## - # replicationPassword: "" - ## @param auth.existingSecret Name of existing secret to use for PostgreSQL credentials. `auth.postgresPassword`, `auth.password`, and `auth.replicationPassword` will be ignored and picked up from this secret. The secret might also contains the key `ldap-password` if LDAP is enabled. `ldap.bind_password` will be ignored and picked from this secret in this case. - ## - existingSecret: "postgresql" - ## @param auth.secretKeys.adminPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## @param auth.secretKeys.userPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## @param auth.secretKeys.replicationPasswordKey Name of key in existing secret to use for PostgreSQL credentials. Only used when `auth.existingSecret` is set. - ## - secretKeys: - adminPasswordKey: postgres-password - userPasswordKey: password - replicationPasswordKey: replication-password - ## @param auth.usePasswordFiles Mount credentials as a files instead of using an environment variable - ## - usePasswordFiles: true -## @param architecture PostgreSQL architecture (`standalone` or `replication`) +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ ## -architecture: standalone -## Replication configuration -## Ignored if `architecture` is `standalone` +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + replication: - ## @param replication.synchronousCommit Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` - ## @param replication.numSynchronousReplicas Number of replicas that will have synchronous replication. Note: Cannot be greater than `readReplicas.replicaCount`. - ## ref: https://www.postgresql.org/docs/current/runtime-config-wal.html#GUC-SYNCHRONOUS-COMMIT - ## - synchronousCommit: "off" - numSynchronousReplicas: 0 - ## @param replication.applicationName Cluster application name. Useful for advanced replication settings - ## + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 0 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies applicationName: my_application -## @param containerPorts.postgresql PostgreSQL container port + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run ## -containerPorts: - postgresql: 5432 -## Audit settings -## https://github.com/bitnami/containers/tree/main/bitnami/postgresql#auditing -## @param audit.logHostname Log client hostnames -## @param audit.logConnections Add client log-in operations to the log file -## @param audit.logDisconnections Add client log-outs operations to the log file -## @param audit.pgAuditLog Add operations to log using the pgAudit extension -## @param audit.pgAuditLogCatalog Log catalog using pgAudit -## @param audit.clientMinMessages Message log level to share with the user -## @param audit.logLinePrefix Template for log line prefix (default if not set) -## @param audit.logTimezone Timezone for the log timestamps +# postgresqlPassword: + +## PostgreSQL password using existing secret +existingSecret: postgresql + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /opt/bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf ## -audit: - logHostname: false - logConnections: false - logDisconnections: false - pgAuditLog: "" - pgAuditLogCatalog: "off" - clientMinMessages: error - logLinePrefix: "" - logTimezone: "" +# postgresqlExtendedConf: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + ## LDAP configuration -## @param ldap.enabled Enable LDAP support -## @param ldap.server IP address or name of the LDAP server. -## @param ldap.port Port number on the LDAP server to connect to -## @param ldap.prefix String to prepend to the user name when forming the DN to bind -## @param ldap.suffix String to append to the user name when forming the DN to bind -## DEPRECATED ldap.baseDN It will removed in a future, please use 'ldap.basedn' instead -## DEPRECATED ldap.bindDN It will removed in a future, please use 'ldap.binddn' instead -## DEPRECATED ldap.bind_password It will removed in a future, please use 'ldap.bindpw' instead -## @param ldap.basedn Root DN to begin the search for the user in -## @param ldap.binddn DN of user to bind to LDAP -## @param ldap.bindpw Password for the user to bind to LDAP -## DEPRECATED ldap.search_attr It will removed in a future, please use 'ldap.searchAttribute' instead -## DEPRECATED ldap.search_filter It will removed in a future, please use 'ldap.searchFilter' instead -## @param ldap.searchAttribute Attribute to match against the user name in the search -## @param ldap.searchFilter The search filter to use when doing search+bind authentication -## @param ldap.scheme Set to `ldaps` to use LDAPS -## DEPRECATED ldap.tls as string is deprecated, please use 'ldap.tls.enabled' instead -## @param ldap.tls.enabled Se to true to enable TLS encryption ## ldap: enabled: false + url: "" server: "" port: "" prefix: "" suffix: "" - basedn: "" - binddn: "" - bindpw: "" - searchAttribute: "" - searchFilter: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" scheme: "" - tls: - enabled: false - ## @param ldap.uri LDAP URL beginning in the form `ldap[s]://host[:port]/basedn`. If provided, all the other LDAP parameters will be ignored. - ## Ref: https://www.postgresql.org/docs/current/auth-ldap.html - ## - uri: "" -## @param postgresqlDataDir PostgreSQL data dir folder -## -postgresqlDataDir: /bitnami/postgresql/data -## @param postgresqlSharedPreloadLibraries Shared preload libraries (comma-separated list) -## -postgresqlSharedPreloadLibraries: "pgaudit" -## Start PostgreSQL pod(s) without limitations on shm memory. -## By default docker and containerd (and possibly other container runtimes) limit `/dev/shm` to `64M` -## ref: https://github.com/docker-library/postgres/issues/416 -## ref: https://github.com/containerd/containerd/issues/3654 -## -shmVolume: - ## @param shmVolume.enabled Enable emptyDir volume for /dev/shm for PostgreSQL pod(s) - ## - enabled: true - ## @param shmVolume.sizeLimit Set this to enable a size limit on the shm tmpfs - ## Note: the size of the tmpfs counts against container's memory limit - ## e.g: - ## sizeLimit: 1Gi - ## - sizeLimit: "" -## TLS configuration -## -tls: - ## @param tls.enabled Enable TLS traffic support - ## - enabled: false - ## @param tls.autoGenerated Generate automatically self-signed TLS certificates - ## - autoGenerated: false - ## @param tls.preferServerCiphers Whether to use the server's TLS cipher preferences rather than the client's - ## - preferServerCiphers: true - ## @param tls.certificatesSecret Name of an existing secret that contains the certificates - ## - certificatesSecret: "" - ## @param tls.certFilename Certificate filename - ## - certFilename: "" - ## @param tls.certKeyFilename Certificate key filename - ## - certKeyFilename: "" - ## @param tls.certCAFilename CA Certificate filename - ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate - ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html - ## - certCAFilename: "" - ## @param tls.crlFilename File containing a Certificate Revocation List - ## - crlFilename: "" -## @section PostgreSQL Primary parameters -## -primary: - ## @param primary.name Name of the primary database (eg primary, master, leader, ...) - ## - name: primary - ## @param primary.configuration PostgreSQL Primary main configuration to be injected as ConfigMap - ## ref: https://www.postgresql.org/docs/current/static/runtime-config.html - ## - # configuration: "" - ## @param primary.pgHbaConfiguration PostgreSQL Primary client authentication configuration - ## ref: https://www.postgresql.org/docs/current/static/auth-pg-hba-conf.html - ## e.g:# - #pgHbaConfiguration: |- - # host all all 0.0.0.0/0 scram-sha-256 - # Allow local connections - # local all all trust + tls: false - ## local all all trust - ## host all all localhost trust - ## host mydatabase mysuser 192.168.0.0/24 md5 - ## - # pgHbaConfiguration: "" - ## @param primary.existingConfigmap Name of an existing ConfigMap with PostgreSQL Primary configuration - ## NOTE: `primary.configuration` and `primary.pgHbaConfiguration` will be ignored - ## - # existingConfigmap: "" - ## @param primary.extendedConfiguration Extended PostgreSQL Primary configuration (appended to main or default configuration) - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf - ## - #extendedConfiguration: |- - # hba_file = '/bitnami/postgresql/conf/pg_hba.conf' - - ## @param primary.existingExtendedConfigmap Name of an existing ConfigMap with PostgreSQL Primary extended configuration - ## NOTE: `primary.extendedConfiguration` will be ignored - ## - # existingExtendedConfigmap: "" - ## Initdb configuration - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#specifying-initdb-arguments - ## - #initdb: - ## @param primary.initdb.args PostgreSQL initdb extra arguments - ## - #args: "" - ## @param primary.initdb.postgresqlWalDir Specify a custom location for the PostgreSQL transaction log - ## - #postgresqlWalDir: "" - ## @param primary.initdb.scripts Dictionary of initdb scripts - ## Specify dictionary of scripts to be run at first boot - ## e.g: - ## scripts: - ## my_init_script.sh: | - ## #!/bin/sh - ## echo "Do something." - ## - #scripts: {} - ## @param primary.initdb.scriptsConfigMap ConfigMap with scripts to be run at first boot - ## NOTE: This will override `primary.initdb.scripts` - ## - #scriptsConfigMap: "" - ## @param primary.initdb.scriptsSecret Secret with scripts to be run at first boot (in case it contains sensitive information) - ## NOTE: This can work along `primary.initdb.scripts` or `primary.initdb.scriptsConfigMap` - ## - #scriptsSecret: "" - ## @param primary.initdb.user Specify the PostgreSQL username to execute the initdb scripts - ## - #user: "" - ## @param primary.initdb.password Specify the PostgreSQL password to execute the initdb scripts - ## - #password: "" - ## Pre-init configuration - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql/#on-container-start - #preInitDb: - ## @param primary.preInitDb.scripts Dictionary of pre-init scripts - ## Specify dictionary of shell scripts to be run before db boot - ## e.g: - ## scripts: - ## my_pre_init_script.sh: | - ## #!/bin/sh - ## echo "Do something." - #scripts: {} - ## @param primary.preInitDb.scriptsConfigMap ConfigMap with pre-init scripts to be run - ## NOTE: This will override `primary.preInitDb.scripts` - #scriptsConfigMap: "" - ## @param primary.preInitDb.scriptsSecret Secret with pre-init scripts to be run - ## NOTE: This can work along `primary.preInitDb.scripts` or `primary.preInitDb.scriptsConfigMap` - #scriptsSecret: "" - ## Configure current cluster's primary server to be the standby server in other cluster. - ## This will allow cross cluster replication and provide cross cluster high availability. - ## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. - ## @param primary.standby.enabled Whether to enable current cluster's primary as standby server of another cluster or not - ## @param primary.standby.primaryHost The Host of replication primary in the other cluster - ## @param primary.standby.primaryPort The Port of replication primary in the other cluster - ## - standby: - enabled: false - primaryHost: "" - primaryPort: "" - ## @param primary.extraEnvVars Array with extra environment variables to add to PostgreSQL Primary nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param primary.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL Primary nodes - ## - extraEnvVarsCM: "" - ## @param primary.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL Primary nodes - ## - extraEnvVarsSecret: "" - ## @param primary.command Override default container command (useful when using custom images) - ## - command: [] - ## @param primary.args Override default container args (useful when using custom images) - ## - args: [] - ## Configure extra options for PostgreSQL Primary containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param primary.livenessProbe.enabled Enable livenessProbe on PostgreSQL Primary containers - ## @param primary.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param primary.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param primary.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param primary.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param primary.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param primary.readinessProbe.enabled Enable readinessProbe on PostgreSQL Primary containers - ## @param primary.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param primary.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param primary.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param primary.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param primary.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param primary.startupProbe.enabled Enable startupProbe on PostgreSQL Primary containers - ## @param primary.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param primary.startupProbe.periodSeconds Period seconds for startupProbe - ## @param primary.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param primary.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param primary.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param primary.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param primary.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param primary.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param primary.lifecycleHooks for the PostgreSQL Primary container to automate configuration before or after startup +## PostgreSQL service configuration +service: + ## PosgresSQL service type + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport ## - lifecycleHooks: {} - ## PostgreSQL Primary resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param primary.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if primary.resources is set (primary.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + # nodePort: + + ## Provide any additional annotations which may be required. + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + annotations: { } + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer ## - #resourcesPreset: "nano" - ## @param primary.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 2 - memory: 8Gi + # loadBalancerIP: - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param primary.podSecurityContext.enabled Enable security context - ## @param primary.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param primary.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param primary.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param primary.podSecurityContext.fsGroup Group ID for the pod + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service ## - podSecurityContext: + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## If this option is present and value is `true`, +## to the target database pod will be mounted a new tmpfs volume to remove +## this limitation. +shmVolume: + enabled: true + chmod: enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param primary.containerSecurityContext.enabled Enabled containers' Security Context - ## @param primary.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param primary.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param primary.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param primary.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param primary.containerSecurityContext.privileged Set container's Security Context privileged - ## @param primary.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param primary.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param primary.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param primary.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: false - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param primary.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## @param primary.hostAliases PostgreSQL primary pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param primary.hostNetwork Specify if host network should be enabled for PostgreSQL pod (postgresql primary) - ## - hostNetwork: false - ## @param primary.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) - ## - hostIPC: false - ## @param primary.labels Map of labels to add to the statefulset (postgresql primary) - ## - labels: {} - ## @param primary.annotations Annotations for PostgreSQL primary pods - ## - annotations: {} - ## @param primary.podLabels Map of labels to add to the pods (postgresql primary) - ## - podLabels: {} - ## @param primary.podAnnotations Map of annotations to add to the pods (postgresql primary) - ## - podAnnotations: {} - ## @param primary.podAffinityPreset PostgreSQL primary pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart ## - podAffinityPreset: "" - ## @param primary.podAntiAffinityPreset PostgreSQL primary pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. ## - podAntiAffinityPreset: soft - ## PostgreSQL Primary node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. ## - #nodeAffinityPreset: - ## @param primary.nodeAffinityPreset.type PostgreSQL primary node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## - #type: "" - ## @param primary.nodeAffinityPreset.key PostgreSQL primary node label key to match Ignored if `primary.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - #key: "" - ## @param primary.nodeAffinityPreset.values PostgreSQL primary node label values to match. Ignored if `primary.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - #values: [] - ## @param primary.affinity Affinity for PostgreSQL primary pods assignment + subPath: "" + + storageClass: "gp2" + accessModes: + - ReadWriteOnce + size: 75Gi + annotations: { } + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set - ## + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: { } affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -600,1209 +303,161 @@ primary: operator: In values: - core - ## @param primary.nodeSelector Node labels for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param primary.tolerations Tolerations for PostgreSQL primary pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param primary.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param primary.priorityClassName Priority Class to use for each pod (postgresql primary) - ## + tolerations: [ ] + labels: { } + annotations: { } + podLabels: { } + podAnnotations: { } priorityClassName: "" - ## @param primary.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param primary.terminationGracePeriodSeconds Seconds PostgreSQL primary pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods - ## - terminationGracePeriodSeconds: "" - ## @param primary.updateStrategy.type PostgreSQL Primary statefulset strategy type - ## @param primary.updateStrategy.rollingUpdate PostgreSQL Primary statefulset rolling update configuration parameters - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - ## - updateStrategy: - type: RollingUpdate - rollingUpdate: {} - ## @param primary.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL Primary container(s) - ## - extraVolumeMounts: [] - ## @param primary.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL Primary pod(s) - ## - extraVolumes: [] - ## @param primary.sidecars Add additional sidecar containers to the PostgreSQL Primary pod(s) - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param primary.initContainers Add additional init containers to the PostgreSQL Primary pod(s) - ## Example - ## - ## initContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param primary.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param primary.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param primary.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `primary.pdb.minAvailable` and `primary.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param primary.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL Primary pod(s) - ## - extraPodSpec: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param primary.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: false - ## @param primary.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param primary.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param primary.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param primary.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param primary.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param primary.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## PostgreSQL Primary service configuration - ## - service: - ## @param primary.service.enabled Enable/disable the PostgreSQL primary service - ## - enabled: true - ## @param primary.service.type Kubernetes Service type - ## - type: ClusterIP - ## @param primary.service.ports.postgresql PostgreSQL service port - ## - ports: - postgresql: 5432 - ## Node ports to expose - ## NOTE: choose port between <30000-32767> - ## @param primary.service.nodePorts.postgresql Node port for PostgreSQL - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - postgresql: "" - ## @param primary.service.clusterIP Static clusterIP or None for headless services - ## e.g: - ## clusterIP: None - ## - clusterIP: "" - ## @param primary.service.labels Map of labels to add to the primary service - ## - labels: {} - ## @param primary.service.annotations Annotations for PostgreSQL primary service - ## - annotations: {} - ## @param primary.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class - ## - loadBalancerClass: "" - ## @param primary.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` - ## Set the LoadBalancer service type to internal only - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param primary.service.externalTrafficPolicy Enable client source IP preservation - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param primary.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param primary.service.extraPorts Extra ports to expose in the PostgreSQL primary service - ## - extraPorts: [] - ## @param primary.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param primary.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param primary.service.headless.annotations Additional custom annotations for headless PostgreSQL primary service - ## - annotations: {} - ## PostgreSQL Primary persistence configuration + ## Additional PostgreSQL Master Volume mounts ## - persistence: - ## @param primary.persistence.enabled Enable PostgreSQL Primary data persistence using PVC - ## - enabled: true - ## @param primary.persistence.volumeName Name to assign the volume - ## - volumeName: "data" - ## @param primary.persistence.existingClaim Name of an existing PVC to use - ## - #existingClaim: "" - ## @param primary.persistence.mountPath The path the volume will be mounted at - ## Note: useful when using custom PostgreSQL images - ## - mountPath: /bitnami/postgresql - ## @param primary.persistence.subPath The subdirectory of the volume to mount to - ## Useful in dev environments and one PV for multiple services - ## - #subPath: "" - ## @param primary.persistence.storageClass PVC Storage Class for PostgreSQL Primary data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "gp2" - ## @param primary.persistence.accessModes PVC Access Mode for PostgreSQL volume - ## - accessModes: - - ReadWriteOnce - ## @param primary.persistence.size PVC Storage Request for PostgreSQL volume - ## - size: 75Gi - ## @param primary.persistence.annotations Annotations for the PVC - ## - annotations: {} - ## @param primary.persistence.labels Labels for the PVC - ## - labels: {} - ## @param primary.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param primary.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## PostgreSQL Primary Persistent Volume Claim Retention Policy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + extraVolumeMounts: [ ] + ## Additional PostgreSQL Master Volumes ## - persistentVolumeClaimRetentionPolicy: - ## @param primary.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for Primary Statefulset - ## - enabled: true - ## @param primary.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## - whenScaled: Retain - ## @param primary.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - whenDeleted: Retain -## @section PostgreSQL read only replica parameters (only used when `architecture` is set to `replication`) + extraVolumes: [ ] + ## -readReplicas: - ## @param readReplicas.name Name of the read replicas database (eg secondary, slave, ...) - ## - name: read - ## @param readReplicas.replicaCount Number of PostgreSQL read only replicas - ## - replicaCount: 1 - ## @param readReplicas.extendedConfiguration Extended PostgreSQL read only replicas configuration (appended to main or default configuration) - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf - ## - extendedConfiguration: "" - ## @param readReplicas.extraEnvVars Array with extra environment variables to add to PostgreSQL read only nodes - ## e.g: - ## extraEnvVars: - ## - name: FOO - ## value: "bar" - ## - extraEnvVars: [] - ## @param readReplicas.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for PostgreSQL read only nodes - ## - extraEnvVarsCM: "" - ## @param readReplicas.extraEnvVarsSecret Name of existing Secret containing extra env vars for PostgreSQL read only nodes - ## - extraEnvVarsSecret: "" - ## @param readReplicas.command Override default container command (useful when using custom images) - ## - command: [] - ## @param readReplicas.args Override default container args (useful when using custom images) - ## - args: [] - ## Configure extra options for PostgreSQL read only containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param readReplicas.livenessProbe.enabled Enable livenessProbe on PostgreSQL read only containers - ## @param readReplicas.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param readReplicas.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param readReplicas.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param readReplicas.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param readReplicas.livenessProbe.successThreshold Success threshold for livenessProbe - ## - livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param readReplicas.readinessProbe.enabled Enable readinessProbe on PostgreSQL read only containers - ## @param readReplicas.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param readReplicas.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param readReplicas.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param readReplicas.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param readReplicas.readinessProbe.successThreshold Success threshold for readinessProbe - ## - readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - ## @param readReplicas.startupProbe.enabled Enable startupProbe on PostgreSQL read only containers - ## @param readReplicas.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param readReplicas.startupProbe.periodSeconds Period seconds for startupProbe - ## @param readReplicas.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param readReplicas.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param readReplicas.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param readReplicas.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param readReplicas.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param readReplicas.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param readReplicas.lifecycleHooks for the PostgreSQL read only container to automate configuration before or after startup - ## - lifecycleHooks: {} - ## PostgreSQL read only resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param readReplicas.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if readReplicas.resources is set (readReplicas.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - # resourcesPreset: "nano" - ## @param readReplicas.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: - requests: - cpu: 250m - memory: 256Mi - limits: - cpu: 2 - memory: 8Gi - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param readReplicas.podSecurityContext.enabled Enable security context - ## @param readReplicas.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param readReplicas.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param readReplicas.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param readReplicas.podSecurityContext.fsGroup Group ID for the pod - ## - podSecurityContext: - enabled: false - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param readReplicas.containerSecurityContext.enabled Enabled containers' Security Context - ## @param readReplicas.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param readReplicas.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param readReplicas.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param readReplicas.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param readReplicas.containerSecurityContext.privileged Set container's Security Context privileged - ## @param readReplicas.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param readReplicas.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param readReplicas.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param readReplicas.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: false - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param readReplicas.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## @param readReplicas.hostAliases PostgreSQL read only pods host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param readReplicas.hostNetwork Specify if host network should be enabled for PostgreSQL pod (PostgreSQL read only) - ## - hostNetwork: false - ## @param readReplicas.hostIPC Specify if host IPC should be enabled for PostgreSQL pod (postgresql primary) - ## - hostIPC: false - ## @param readReplicas.labels Map of labels to add to the statefulset (PostgreSQL read only) - ## - labels: {} - ## @param readReplicas.annotations Annotations for PostgreSQL read only pods - ## - annotations: {} - ## @param readReplicas.podLabels Map of labels to add to the pods (PostgreSQL read only) - ## - podLabels: {} - ## @param readReplicas.podAnnotations Map of annotations to add to the pods (PostgreSQL read only) - ## - podAnnotations: {} - ## @param readReplicas.podAffinityPreset PostgreSQL read only pod affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAffinityPreset: "" - ## @param readReplicas.podAntiAffinityPreset PostgreSQL read only pod anti-affinity preset. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity - ## - podAntiAffinityPreset: soft - ## PostgreSQL read only node affinity preset - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity - ## - nodeAffinityPreset: - ## @param readReplicas.nodeAffinityPreset.type PostgreSQL read only node affinity preset type. Ignored if `primary.affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param readReplicas.nodeAffinityPreset.key PostgreSQL read only node label key to match Ignored if `primary.affinity` is set. - ## E.g. - ## key: "kubernetes.io/e2e-az-name" - ## - key: "" - ## @param readReplicas.nodeAffinityPreset.values PostgreSQL read only node label values to match. Ignored if `primary.affinity` is set. - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] - ## @param readReplicas.affinity Affinity for PostgreSQL read only pods assignment +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: primary.podAffinityPreset, primary.podAntiAffinityPreset, and primary.nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param readReplicas.nodeSelector Node labels for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} - ## @param readReplicas.tolerations Tolerations for PostgreSQL read only pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## @param readReplicas.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods - ## - topologySpreadConstraints: [] - ## @param readReplicas.priorityClassName Priority Class to use for each pod (PostgreSQL read only) - ## + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + nodeSelector: { } + affinity: { } + tolerations: [ ] + labels: { } + annotations: { } + podLabels: { } + podAnnotations: { } priorityClassName: "" - ## @param readReplicas.schedulerName Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - schedulerName: "" - ## @param readReplicas.terminationGracePeriodSeconds Seconds PostgreSQL read only pod needs to terminate gracefully - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## Additional PostgreSQL Slave Volume mounts ## - terminationGracePeriodSeconds: "" - ## @param readReplicas.updateStrategy.type PostgreSQL read only statefulset strategy type - ## @param readReplicas.updateStrategy.rollingUpdate PostgreSQL read only statefulset rolling update configuration parameters - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + extraVolumeMounts: [ ] + ## Additional PostgreSQL Slave Volumes ## - updateStrategy: - type: RollingUpdate - rollingUpdate: {} - ## @param readReplicas.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the PostgreSQL read only container(s) - ## - extraVolumeMounts: [] - ## @param readReplicas.extraVolumes Optionally specify extra list of additional volumes for the PostgreSQL read only pod(s) - ## - extraVolumes: [] - ## @param readReplicas.sidecars Add additional sidecar containers to the PostgreSQL read only pod(s) - ## For example: - ## sidecars: - ## - name: your-image-name - ## image: your-image - ## imagePullPolicy: Always - ## ports: - ## - name: portname - ## containerPort: 1234 - ## - sidecars: [] - ## @param readReplicas.initContainers Add additional init containers to the PostgreSQL read only pod(s) - ## Example - ## - ## initContainers: - ## - name: do-something - ## image: busybox - ## command: ['do', 'something'] - ## - initContainers: [] - ## Pod Disruption Budget configuration - ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb - ## @param readReplicas.pdb.create Enable/disable a Pod Disruption Budget creation - ## @param readReplicas.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled - ## @param readReplicas.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `readReplicas.pdb.minAvailable` and `readReplicas.pdb.maxUnavailable` are empty. - ## - pdb: - create: true - minAvailable: "" - maxUnavailable: "" - ## @param readReplicas.extraPodSpec Optionally specify extra PodSpec for the PostgreSQL read only pod(s) - ## - extraPodSpec: {} - ## Network Policies - ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ - ## - networkPolicy: - ## @param readReplicas.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - ## @param readReplicas.networkPolicy.allowExternal Don't require server label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## server label will have network access to the ports server is listening - ## on. When true, server will accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - ## @param readReplicas.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. - ## - allowExternalEgress: true - ## @param readReplicas.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraIngress: - ## - ports: - ## - port: 1234 - ## from: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - extraIngress: [] - ## @param readReplicas.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy - ## e.g: - ## extraEgress: - ## - ports: - ## - port: 1234 - ## to: - ## - podSelector: - ## - matchLabels: - ## - role: frontend - ## - podSelector: - ## - matchExpressions: - ## - key: role - ## operator: In - ## values: - ## - frontend - ## - extraEgress: [] - ## @param readReplicas.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces - ## @param readReplicas.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces - ## - ingressNSMatchLabels: {} - ingressNSPodMatchLabels: {} - ## PostgreSQL read only service configuration - ## - service: - ## @param readReplicas.service.type Kubernetes Service type - ## - type: ClusterIP - ## @param readReplicas.service.ports.postgresql PostgreSQL service port - ## - ports: - postgresql: 5432 - ## Node ports to expose - ## NOTE: choose port between <30000-32767> - ## @param readReplicas.service.nodePorts.postgresql Node port for PostgreSQL - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - postgresql: "" - ## @param readReplicas.service.clusterIP Static clusterIP or None for headless services - ## e.g: - ## clusterIP: None - ## - clusterIP: "" - ## @param readReplicas.service.labels Map of labels to add to the read service - ## - labels: {} - ## @param readReplicas.service.annotations Annotations for PostgreSQL read only service - ## - annotations: {} - ## @param readReplicas.service.loadBalancerClass Load balancer class if service type is `LoadBalancer` - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class - ## - loadBalancerClass: "" - ## @param readReplicas.service.loadBalancerIP Load balancer IP if service type is `LoadBalancer` - ## Set the LoadBalancer service type to internal only - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param readReplicas.service.externalTrafficPolicy Enable client source IP preservation - ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param readReplicas.service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param readReplicas.service.extraPorts Extra ports to expose in the PostgreSQL read only service - ## - extraPorts: [] - ## @param readReplicas.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param readReplicas.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param readReplicas.service.headless.annotations Additional custom annotations for headless PostgreSQL read only service - ## - annotations: {} - ## PostgreSQL read only persistence configuration - ## - persistence: - ## @param readReplicas.persistence.enabled Enable PostgreSQL read only data persistence using PVC - ## - enabled: true - ## @param readReplicas.persistence.existingClaim Name of an existing PVC to use - ## - existingClaim: "" - ## @param readReplicas.persistence.mountPath The path the volume will be mounted at - ## Note: useful when using custom PostgreSQL images - ## - mountPath: /bitnami/postgresql - ## @param readReplicas.persistence.subPath The subdirectory of the volume to mount to - ## Useful in dev environments and one PV for multiple services - ## - subPath: "" - ## @param readReplicas.persistence.storageClass PVC Storage Class for PostgreSQL read only data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "" - ## @param readReplicas.persistence.accessModes PVC Access Mode for PostgreSQL volume - ## - accessModes: - - ReadWriteOnce - ## @param readReplicas.persistence.size PVC Storage Request for PostgreSQL volume - ## - size: 8Gi - ## @param readReplicas.persistence.annotations Annotations for the PVC - ## - annotations: {} - ## @param readReplicas.persistence.labels Labels for the PVC - ## - labels: {} - ## @param readReplicas.persistence.selector Selector to match an existing Persistent Volume (this value is evaluated as a template) - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param readReplicas.persistence.dataSource Custom PVC data source - ## - dataSource: {} - ## PostgreSQL Read only Persistent Volume Claim Retention Policy - ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention - ## - persistentVolumeClaimRetentionPolicy: - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.enabled Enable Persistent volume retention policy for read only Statefulset - ## - enabled: false - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenScaled Volume retention behavior when the replica count of the StatefulSet is reduced - ## - whenScaled: Retain - ## @param readReplicas.persistentVolumeClaimRetentionPolicy.whenDeleted Volume retention behavior that applies when the StatefulSet is deleted - ## - whenDeleted: Retain -## @section Backup parameters -## This section implements a trivial logical dump cronjob of the database. -## This only comes with the consistency guarantees of the dump program. -## This is not a snapshot based roll forward/backward recovery backup. -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ -backup: - ## @param backup.enabled Enable the logical dump of the database "regularly" - enabled: false - cronjob: - ## @param backup.cronjob.schedule Set the cronjob parameter schedule - schedule: "@daily" - ## @param backup.cronjob.timeZone Set the cronjob parameter timeZone - timeZone: "" - ## @param backup.cronjob.concurrencyPolicy Set the cronjob parameter concurrencyPolicy - concurrencyPolicy: Allow - ## @param backup.cronjob.failedJobsHistoryLimit Set the cronjob parameter failedJobsHistoryLimit - failedJobsHistoryLimit: 1 - ## @param backup.cronjob.successfulJobsHistoryLimit Set the cronjob parameter successfulJobsHistoryLimit - successfulJobsHistoryLimit: 3 - ## @param backup.cronjob.startingDeadlineSeconds Set the cronjob parameter startingDeadlineSeconds - startingDeadlineSeconds: "" - ## @param backup.cronjob.ttlSecondsAfterFinished Set the cronjob parameter ttlSecondsAfterFinished - ttlSecondsAfterFinished: "" - ## @param backup.cronjob.restartPolicy Set the cronjob parameter restartPolicy - restartPolicy: OnFailure - ## @param backup.cronjob.podSecurityContext.enabled Enable PodSecurityContext for CronJob/Backup - ## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the CronJob - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## backup container's Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context - ## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged - ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## @param backup.cronjob.command Set backup container's command to run - command: - - /bin/bash - - -c - - PGPASSWORD="${PGPASSWORD:-$(< "$PGPASSWORD_FILE")}" pg_dumpall --clean --if-exists --load-via-partition-root --quote-all-identifiers --no-password --file="${PGDUMP_DIR}/pg_dumpall-$(date '+%Y-%m-%d-%H-%M').pgdump" - ## @param backup.cronjob.labels Set the cronjob labels - labels: {} - ## @param backup.cronjob.annotations Set the cronjob annotations - annotations: {} - ## @param backup.cronjob.nodeSelector Node labels for PostgreSQL backup CronJob pod assignment - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/ - ## - nodeSelector: {} - ## @param backup.cronjob.tolerations Tolerations for PostgreSQL backup CronJob pods assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## backup cronjob container resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if backup.cronjob.resources is set (backup.cronjob.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory - ## Example: - resources: {} - ## resources: - ## requests: - ## cpu: 1 - ## memory: 512Mi - ## limits: - ## cpu: 2 - ## memory: 1024Mi - networkPolicy: - ## @param backup.cronjob.networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: true - storage: - ## @param backup.cronjob.storage.enabled Enable using a `PersistentVolumeClaim` as backup data volume - ## - enabled: true - ## @param backup.cronjob.storage.existingClaim Provide an existing `PersistentVolumeClaim` (only when `architecture=standalone`) - ## If defined, PVC must be created manually before volume will be bound - ## - existingClaim: "" - ## @param backup.cronjob.storage.resourcePolicy Setting it to "keep" to avoid removing PVCs during a helm delete operation. Leaving it empty will delete PVCs after the chart deleted - ## - resourcePolicy: "" - ## @param backup.cronjob.storage.storageClass PVC Storage Class for the backup data volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. - ## - storageClass: "" - ## @param backup.cronjob.storage.accessModes PV Access Mode - ## - accessModes: - - ReadWriteOnce - ## @param backup.cronjob.storage.size PVC Storage Request for the backup data volume - ## - size: 8Gi - ## @param backup.cronjob.storage.annotations PVC annotations - ## - annotations: {} - ## @param backup.cronjob.storage.mountPath Path to mount the volume at - ## - mountPath: /backup/pgdump - ## @param backup.cronjob.storage.subPath Subdirectory of the volume to mount at - ## and one PV for multiple services. - ## - subPath: "" - ## Fine tuning for volumeClaimTemplates - ## - volumeClaimTemplates: - ## @param backup.cronjob.storage.volumeClaimTemplates.selector A label query over volumes to consider for binding (e.g. when using local volumes) - ## A label query over volumes to consider for binding (e.g. when using local volumes) - ## See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#labelselector-v1-meta for more details - ## - selector: {} - ## @param backup.cronjob.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the backup container - ## - extraVolumeMounts: [] - ## @param backup.cronjob.extraVolumes Optionally specify extra list of additional volumes for the backup container - ## - extraVolumes: [] + extraVolumes: [ ] -## @section Password update job -## -passwordUpdateJob: - ## @param passwordUpdateJob.enabled Enable password update job +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + limits: + cpu: 2 + memory: 8Gi + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. ## enabled: false - ## @param passwordUpdateJob.backoffLimit set backoff limit of the job - ## - backoffLimit: 10 - ## @param passwordUpdateJob.command Override default container command on mysql Primary container(s) (useful when using custom images) - ## - command: [] - ## @param passwordUpdateJob.args Override default container args on mysql Primary container(s) (useful when using custom images) - ## - args: [] - ## @param passwordUpdateJob.extraCommands Extra commands to pass to the generation job - ## - extraCommands: "" - ## @param passwordUpdateJob.previousPasswords.postgresPassword Previous postgres password (set if the password secret was already changed) - ## @param passwordUpdateJob.previousPasswords.password Previous password (set if the password secret was already changed) - ## @param passwordUpdateJob.previousPasswords.replicationPassword Previous replication password (set if the password secret was already changed) - ## @param passwordUpdateJob.previousPasswords.existingSecret Name of a secret containing the previous passwords (set if the password secret was already changed) - previousPasswords: - postgresPassword: "" - password: "" - replicationPassword: "" - existingSecret: "" - ## Configure Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param passwordUpdateJob.containerSecurityContext.enabled Enabled containers' Security Context - ## @param passwordUpdateJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param passwordUpdateJob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param passwordUpdateJob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param passwordUpdateJob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param passwordUpdateJob.containerSecurityContext.privileged Set container's Security Context privileged - ## @param passwordUpdateJob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param passwordUpdateJob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param passwordUpdateJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param passwordUpdateJob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile - ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## Configure Pods Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod - ## @param passwordUpdateJob.podSecurityContext.enabled Enabled credential init job pods' Security Context - ## @param passwordUpdateJob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param passwordUpdateJob.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param passwordUpdateJob.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param passwordUpdateJob.podSecurityContext.fsGroup Set credential init job pod's Security Context fsGroup - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## @param passwordUpdateJob.extraEnvVars Array containing extra env vars to configure the credential init job - ## For example: - ## extraEnvVars: - ## - name: GF_DEFAULT_INSTANCE_NAME - ## value: my-instance - ## - extraEnvVars: [] - ## @param passwordUpdateJob.extraEnvVarsCM ConfigMap containing extra env vars to configure the credential init job - ## - extraEnvVarsCM: "" - ## @param passwordUpdateJob.extraEnvVarsSecret Secret containing extra env vars to configure the credential init job (in case of sensitive data) - ## - extraEnvVarsSecret: "" - ## @param passwordUpdateJob.extraVolumes Optionally specify extra list of additional volumes for the credential init job - ## - extraVolumes: [] - ## @param passwordUpdateJob.extraVolumeMounts Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`. - ## - extraVolumeMounts: [] - ## @param passwordUpdateJob.initContainers Add additional init containers for the mysql Primary pod(s) - ## - initContainers: [] - ## Container resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param passwordUpdateJob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if passwordUpdateJob.resources is set (passwordUpdateJob.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "micro" - ## @param passwordUpdateJob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## @param passwordUpdateJob.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param passwordUpdateJob.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param passwordUpdateJob.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param passwordUpdateJob.automountServiceAccountToken Mount Service Account token in pod - ## - automountServiceAccountToken: false - ## @param passwordUpdateJob.hostAliases Add deployment host aliases - ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ - ## - hostAliases: [] - ## @param passwordUpdateJob.annotations [object] Add annotations to the job - ## - annotations: {} - ## @param passwordUpdateJob.podLabels Additional pod labels - ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - ## - podLabels: {} - ## @param passwordUpdateJob.podAnnotations Additional pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). ## - podAnnotations: {} + allowExternal: true -## @section Volume Permissions parameters -## + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} -## Init containers parameters: -## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node -## -volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume - ## - enabled: false - ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry - ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository - ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) - ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy - ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets - ## - image: - registry: docker.io - repository: bitnamilegacy/os-shell - tag: 12-debian-12-r51 - digest: "" - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Init container' Security Context - ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser - ## and not the below volumePermissions.containerSecurityContext.runAsUser - ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container - ## @param volumePermissions.containerSecurityContext.runAsGroup Group ID for the init container - ## @param volumePermissions.containerSecurityContext.runAsNonRoot runAsNonRoot for the init container - ## @param volumePermissions.containerSecurityContext.seccompProfile.type seccompProfile.type for the init container - ## - containerSecurityContext: - seLinuxOptions: {} - runAsUser: 0 - runAsGroup: 0 - runAsNonRoot: false - seccompProfile: - type: RuntimeDefault -## @section Other Parameters -## +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 -## @param serviceBindings.enabled Create secret for service binding (Experimental) -## Ref: https://servicebinding.io/service-provider/ -## -serviceBindings: - enabled: false -## Service account for PostgreSQL to use. -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## -serviceAccount: - ## @param serviceAccount.create Enable creation of ServiceAccount for PostgreSQL pod - ## - create: false - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created - ## Can be set to false if pods using this serviceAccount do not need to use K8s API - ## - automountServiceAccountToken: false - ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount - ## - annotations: {} -## Creates role for ServiceAccount -## @param rbac.create Create Role and RoleBinding (required for PSP to work) -## -rbac: - create: false - ## @param rbac.rules Custom RBAC rules to set - ## e.g: - ## rules: - ## - apiGroups: - ## - "" - ## resources: - ## - pods - ## verbs: - ## - get - ## - list - ## - rules: [] -## Pod Security Policy -## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later -## -psp: - create: false -## @section Metrics Parameters +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Configure metrics exporter ## metrics: - ## @param metrics.enabled Start a prometheus exporter - ## enabled: false - ## @param metrics.image.registry [default: REGISTRY_NAME] PostgreSQL Prometheus Exporter image registry - ## @param metrics.image.repository [default: REPOSITORY_NAME/postgres-exporter] PostgreSQL Prometheus Exporter image repository - ## @skip metrics.image.tag PostgreSQL Prometheus Exporter image tag (immutable tags are recommended) - ## @param metrics.image.digest PostgreSQL image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param metrics.image.pullPolicy PostgreSQL Prometheus Exporter image pull policy - ## @param metrics.image.pullSecrets Specify image pull secrets - ## + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: { } + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: { } + namespace: "" + rules: [ ] + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # - alert: HugeReplicationLag + # expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + # for: 1m + # labels: + # severity: critical + # annotations: + # description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + # summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). image: registry: docker.io - #repository: bitnami/postgres-exporter - #tag: 0.17.1-debian-12-r16 - digest: "" + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r4 pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName ## - pullSecrets: [] - ## @param metrics.collectors Control enabled collectors - ## ref: https://github.com/prometheus-community/postgres_exporter#flags - ## Example: - ## collectors: - ## wal: false - collectors: {} - ## @param metrics.customMetrics Define additional custom metrics - ## ref: https://github.com/prometheus-community/postgres_exporter#adding-new-metrics-via-a-config-file-deprecated - ## customMetrics: - ## pg_database: - ## query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" - ## metrics: - ## - name: - ## usage: "LABEL" - ## description: "Name of the database" - ## - size_bytes: - ## usage: "GAUGE" - ## description: "Size of the database in bytes" - ## - customMetrics: {} - ## @param metrics.extraEnvVars Extra environment variables to add to PostgreSQL Prometheus exporter - ## see: https://github.com/prometheus-community/postgres_exporter#environment-variables - ## For example: - ## extraEnvVars: - ## - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS - ## value: "true" - ## - extraEnvVars: [] - ## PostgreSQL Prometheus exporter containers' Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param metrics.containerSecurityContext.enabled Enabled containers' Security Context - ## @param metrics.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param metrics.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param metrics.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param metrics.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param metrics.containerSecurityContext.privileged Set container's Security Context privileged - ## @param metrics.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param metrics.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param metrics.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param metrics.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ ## - containerSecurityContext: - enabled: true - seLinuxOptions: {} + securityContext: + enabled: false runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true - privileged: false - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - ## Configure extra options for PostgreSQL Prometheus exporter containers' liveness, readiness and startup probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes - ## @param metrics.livenessProbe.enabled Enable livenessProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe - ## @param metrics.livenessProbe.periodSeconds Period seconds for livenessProbe - ## @param metrics.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe - ## @param metrics.livenessProbe.failureThreshold Failure threshold for livenessProbe - ## @param metrics.livenessProbe.successThreshold Success threshold for livenessProbe - ## + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes livenessProbe: enabled: true initialDelaySeconds: 5 @@ -1810,13 +465,7 @@ metrics: timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 - ## @param metrics.readinessProbe.enabled Enable readinessProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe - ## @param metrics.readinessProbe.periodSeconds Period seconds for readinessProbe - ## @param metrics.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe - ## @param metrics.readinessProbe.failureThreshold Failure threshold for readinessProbe - ## @param metrics.readinessProbe.successThreshold Success threshold for readinessProbe - ## + readinessProbe: enabled: true initialDelaySeconds: 5 @@ -1824,131 +473,3 @@ metrics: timeoutSeconds: 5 failureThreshold: 6 successThreshold: 1 - ## @param metrics.startupProbe.enabled Enable startupProbe on PostgreSQL Prometheus exporter containers - ## @param metrics.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe - ## @param metrics.startupProbe.periodSeconds Period seconds for startupProbe - ## @param metrics.startupProbe.timeoutSeconds Timeout seconds for startupProbe - ## @param metrics.startupProbe.failureThreshold Failure threshold for startupProbe - ## @param metrics.startupProbe.successThreshold Success threshold for startupProbe - ## - startupProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 15 - successThreshold: 1 - ## @param metrics.customLivenessProbe Custom livenessProbe that overrides the default one - ## - customLivenessProbe: {} - ## @param metrics.customReadinessProbe Custom readinessProbe that overrides the default one - ## - customReadinessProbe: {} - ## @param metrics.customStartupProbe Custom startupProbe that overrides the default one - ## - customStartupProbe: {} - ## @param metrics.containerPorts.metrics PostgreSQL Prometheus exporter metrics container port - ## - containerPorts: - metrics: 9187 - ## PostgreSQL Prometheus exporter resource requests and limits - ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - ## @param metrics.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if metrics.resources is set (metrics.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "nano" - ## @param metrics.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Service configuration - ## - service: - ## @param metrics.service.ports.metrics PostgreSQL Prometheus Exporter service port - ## - ports: - metrics: 9187 - ## @param metrics.service.clusterIP Static clusterIP or None for headless services - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address - ## - clusterIP: "" - ## @param metrics.service.sessionAffinity Control where client requests go, to the same pod or round-robin - ## Values: ClientIP or None - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ - ## - sessionAffinity: None - ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.service.ports.metrics }}" - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) - ## - namespace: "" - ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - interval: "" - ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus - ## - labels: {} - ## @param metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## - selector: {} - ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## - relabelings: [] - ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## - honorLabels: false - ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. - ## - jobLabel: "" - ## Custom PrometheusRule to be defined - ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## - prometheusRule: - ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator - ## - enabled: false - ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) - ## - namespace: "" - ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus - ## - labels: {} - ## @param metrics.prometheusRule.rules PrometheusRule definitions - ## Make sure to constraint the rules to the current postgresql service. - ## rules: - ## - alert: HugeReplicationLag - ## expr: pg_replication_lag{service="{{ printf "%s-metrics" (include "postgresql.v1.chart.fullname" .) }}"} / 3600 > 1 - ## for: 1m - ## labels: - ## severity: critical - ## annotations: - ## description: replication for {{ include "postgresql.v1.chart.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). - ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). - ## - rules: [] diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf index af12e3e..361908d 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq.tf @@ -6,10 +6,11 @@ data "kubernetes_secret" "rabbitmq_core" { } resource "helm_release" "rabbitmq" { name = "rabbitmq" - repository = "oci://registry-1.docker.io/bitnamicharts" - chart = "rabbitmq" + #repository = "oci://registry-1.docker.io/bitnamicharts" + #chart = "rabbitmq" + chart = "${path.module}/charts/rabbitmq" namespace = "core" - version = "16.0.14" + #version = "16.0.14" values = [ file("${path.module}/rabbitmq_values/rabbitmq.yaml") From 5c29e8d7d9bff9f9171bd3f231446adc6dbc7343 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 24 Feb 2026 11:26:03 -0500 Subject: [PATCH 43/47] Fixed RabbitMQ. --- .../rabbitmq_values/rabbitmq.yaml | 105 +++++++++--------- 1 file changed, 53 insertions(+), 52 deletions(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml index 876d615..85e2470 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml @@ -21,7 +21,7 @@ image: ## set to true if you would like to see extra information on logs ## it turns BASH and NAMI debugging in minideb ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging - debug: false + debug: true ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' @@ -66,7 +66,7 @@ rbac: podManagementPolicy: OrderedReady ## section of specific values for rabbitmq -auth: +rabbitmq: ## RabbitMQ application username ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables ## @@ -89,60 +89,60 @@ auth: ## # rabbitmqClusterNodeName: -## Value for the RABBITMQ_LOGS environment variable -## ref: https://www.rabbitmq.com/logging.html#log-file-location -## -logs: '-' + ## Value for the RABBITMQ_LOGS environment variable + ## ref: https://www.rabbitmq.com/logging.html#log-file-location + ## + logs: '-' -## RabbitMQ Max File Descriptors -## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables -## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits -## -# setUlimitNofiles: true # No longer used it seems -ulimitNofiles: '65536' + ## RabbitMQ Max File Descriptors + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits + ## + # setUlimitNofiles: true # No longer used it seems + ulimitNofiles: '65536' -## RabbitMQ maximum available scheduler threads and online scheduler threads -## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads -## -maxAvailableSchedulers: 2 -onlineSchedulers: 1 + ## RabbitMQ maximum available scheduler threads and online scheduler threads + ## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads + ## + maxAvailableSchedulers: 2 + onlineSchedulers: 1 -## Plugins to enable -plugins: "rabbitmq_management" + ## Plugins to enable + plugins: "rabbitmq_management" -## Extra plugins to enable -## Use this instead of `plugins` to add new plugins -# extraPlugins: "rabbitmq_auth_backend_ldap" + ## Extra plugins to enable + ## Use this instead of `plugins` to add new plugins + # extraPlugins: "rabbitmq_auth_backend_ldap" -## Clustering settings -clustering: - address_type: hostname - #k8s_domain: cluster.local # Apparently no longer used - ## Rebalance master for queues in cluster when new replica is created - ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance - rebalance: false + ## Clustering settings + clustering: + address_type: hostname + #k8s_domain: cluster.local # Apparently no longer used + ## Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + rebalance: false -loadDefinition: - enabled: false - existingSecret: load-definition - -## environment variables to configure rabbitmq -## ref: https://www.rabbitmq.com/configure.html#customise-environment -env: { } - -## Configuration file content: required cluster configuration -## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead -configuration: |- - # queue master locator - queue_master_locator=min-masters - # enable guest user - loopback_users.guest = false - -## Configuration file content: extra configuration -## Use this instead of `configuration` to add more configuration -extraConfiguration: |- - #disk_free_limit.absolute = 50MB - #management.load_definitions = /app/load_definition.json + loadDefinition: + enabled: false + existingSecret: load-definition + + ## environment variables to configure rabbitmq + ## ref: https://www.rabbitmq.com/configure.html#customise-environment + env: { } + + ## Configuration file content: required cluster configuration + ## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead + configuration: |- + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + + ## Configuration file content: extra configuration + ## Use this instead of `configuration` to add more configuration + extraConfiguration: |- + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json ## Configuration file content: advanced configuration ## Use this as additional configuraton in classic config format (Erlang term configuration format) @@ -415,13 +415,14 @@ metrics: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup ## volumePermissions: - enabled: false + enabled: true ## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an ## unknown order. ## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot ## -forceBoot: false +forceBoot: + enabled: false ## Optionally specify extra secrets to be created by the chart. ## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. From 3611a45ce056dc6f404f191a050d3d4ce47df3b1 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Tue, 24 Feb 2026 11:30:08 -0500 Subject: [PATCH 44/47] Adds vended RabbitMQ chart. --- .../k8s_data_layer/charts/rabbitmq/Chart.yaml | 17 + .../k8s_data_layer/charts/rabbitmq/README.md | 421 +++++++++++++ .../ci/affinity-toleration-values.yaml | 14 + .../charts/rabbitmq/ci/default-values.yaml | 1 + .../rabbitmq/ci/networkpolicy-values.yaml | 11 + .../charts/rabbitmq/templates/NOTES.txt | 79 +++ .../charts/rabbitmq/templates/_helpers.tpl | 258 ++++++++ .../charts/rabbitmq/templates/certs.yaml | 19 + .../rabbitmq/templates/configuration.yaml | 41 ++ .../rabbitmq/templates/healthchecks.yaml | 32 + .../charts/rabbitmq/templates/ingress.yaml | 41 ++ .../rabbitmq/templates/networkpolicy.yaml | 39 ++ .../charts/rabbitmq/templates/pdb.yaml | 17 + .../rabbitmq/templates/prometheusrule.yaml | 23 + .../charts/rabbitmq/templates/role.yaml | 15 + .../rabbitmq/templates/rolebinding.yaml | 18 + .../charts/rabbitmq/templates/secrets.yaml | 38 ++ .../rabbitmq/templates/serviceaccount.yaml | 11 + .../rabbitmq/templates/servicemonitor.yaml | 36 ++ .../rabbitmq/templates/statefulset.yaml | 372 ++++++++++++ .../rabbitmq/templates/svc-headless.yaml | 32 + .../charts/rabbitmq/templates/svc.yaml | 78 +++ .../charts/rabbitmq/values-production.yaml | 574 ++++++++++++++++++ .../charts/rabbitmq/values.schema.json | 100 +++ .../charts/rabbitmq/values.yaml | 555 +++++++++++++++++ 25 files changed, 2842 insertions(+) create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/Chart.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/README.md create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/affinity-toleration-values.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/default-values.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/networkpolicy-values.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/NOTES.txt create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/_helpers.tpl create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/certs.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/configuration.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/healthchecks.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/ingress.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/networkpolicy.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/pdb.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/prometheusrule.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/role.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/rolebinding.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/secrets.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/serviceaccount.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/servicemonitor.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/statefulset.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc-headless.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values-production.yaml create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.schema.json create mode 100644 terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.yaml diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/Chart.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/Chart.yaml new file mode 100644 index 0000000..e12666f --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/Chart.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +name: rabbitmq +version: 6.18.2 +appVersion: 3.8.2 +description: Open source message broker software that implements the Advanced Message Queuing Protocol (AMQP) +keywords: +- rabbitmq +- message queue +- AMQP +home: https://www.rabbitmq.com +icon: https://bitnami.com/assets/stacks/rabbitmq/img/rabbitmq-stack-220x234.png +sources: +- https://github.com/bitnami/bitnami-docker-rabbitmq +maintainers: +- name: Bitnami + email: containers@bitnami.com +engine: gotpl diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/README.md b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/README.md new file mode 100644 index 0000000..49af4b7 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/README.md @@ -0,0 +1,421 @@ +# RabbitMQ + +[RabbitMQ](https://www.rabbitmq.com/) is an open source message broker software that implements the Advanced Message Queuing Protocol (AMQP). + +## TL;DR; + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +## Introduction + +This chart bootstraps a [RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.11+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/rabbitmq +``` + +The command deploys RabbitMQ on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RabbitMQ chart and their default values. + +| Parameter | Description | Default | +| -------------------------------------------- | ------------------------------------------------ | ------------------------------------------------------- | +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | Rabbitmq Image registry | `docker.io` | +| `image.repository` | Rabbitmq Image name | `bitnami/rabbitmq` | +| `image.tag` | Rabbitmq Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override rabbitmq.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override rabbitmq.fullname template with a string | `nil` | +| `rbacEnabled` | Specify if rbac is enabled in your cluster | `true` | +| `podManagementPolicy` | Pod management policy | `OrderedReady` | +| `rabbitmq.username` | RabbitMQ application username | `user` | +| `rabbitmq.password` | RabbitMQ application password | _random 10 character long alphanumeric string_ | +| `rabbitmq.existingPasswordSecret` | Existing secret with RabbitMQ credentials | `nil` | +| `rabbitmq.erlangCookie` | Erlang cookie | _random 32 character long alphanumeric string_ | +| `rabbitmq.existingErlangSecret` | Existing secret with RabbitMQ Erlang cookie | `nil` | +| `rabbitmq.plugins` | List of plugins to enable | `rabbitmq_management rabbitmq_peer_discovery_k8s` | +| `rabbitmq.extraPlugins` | Extra plugings to enable | `nil` | +| `rabbitmq.clustering.address_type` | Switch clustering mode | `ip` or `hostname` | +| `rabbitmq.clustering.k8s_domain` | Customize internal k8s cluster domain | `cluster.local` | +| `rabbitmq.clustering.rebalance` | Rebalance master for queues in cluster when new replica is created | `false` | +| `rabbitmq.logs` | Value for the RABBITMQ_LOGS environment variable | `-` | +| `rabbitmq.setUlimitNofiles` | Specify if max file descriptor limit should be set | `true` | +| `rabbitmq.ulimitNofiles` | Max File Descriptor limit | `65536` | +| `rabbitmq.maxAvailableSchedulers` | RabbitMQ maximum available scheduler threads | `2` | +| `rabbitmq.onlineSchedulers` | RabbitMQ online scheduler threads | `1` | +| `rabbitmq.env` | RabbitMQ [environment variables](https://www.rabbitmq.com/configure.html#customise-environment) | `{}` | +| `rabbitmq.configuration` | Required cluster configuration | See values.yaml | +| `rabbitmq.extraConfiguration` | Extra configuration to add to rabbitmq.conf | See values.yaml | +| `rabbitmq.advancedConfiguration` | Extra configuration (in classic format) to add to advanced.config | See values.yaml | +| `rabbitmq.tls.enabled` | Enable TLS support to rabbitmq | `false` | +| `rabbitmq.tls.failIfNoPeerCert` | When set to true, TLS connection will be rejected if client fails to provide a certificate | `true` | +| `rabbitmq.tls.sslOptionsVerify` | `verify_peer` | Should [peer verification](https://www.rabbitmq.com/ssl.html#peer-verification) be enabled? | +| `rabbitmq.tls.caCertificate` | Ca certificate | Certificate Authority (CA) bundle content | +| `rabbitmq.tls.serverCertificate` | Server certificate | Server certificate content | +| `rabbitmq.tls.serverKey` | Server Key | Server private key content | +| `rabbitmq.tls.existingSecret` | Existing secret with certificate content to rabbitmq credentials | `nil` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.server` | LDAP server | `""` | +| `ldap.port` | LDAP port | `389` | +| `ldap.user_dn_pattern` | DN used to bind to LDAP | `cn=${username},dc=example,dc=org` | +| `ldap.tls.enabled` | Enable TLS for LDAP connections | `false` (if set to true, check advancedConfiguration parameter in values.yml) | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | Amqp port | `5672` | +| `service.loadBalancerIP` | LoadBalancerIP for the service | `nil` | +| `service.tlsPort` | Amqp TLS port | `5671` | +| `service.distPort` | Erlang distribution server port | `25672` | +| `service.nodePort` | Node port override, if serviceType NodePort | _random available between 30000-32767_ | +| `service.nodeTlsPort` | Node port override, if serviceType NodePort | _random available between 30000-32767_ | +| `service.managerPort` | RabbitMQ Manager port | `15672` | +| `service.extraPorts` | Extra ports to expose in the service | `nil` | +| `service.extraContainerPorts` | Extra ports to be included in container spec, primarily informational | `nil` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `service.annotations` | service annotations | {} | +| `schedulerName` | Name of the k8s service (other than default) | `nil` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.existingClaim` | RabbitMQ data Persistent Volume existing claim name, evaluated as a template | "" | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.path` | Mount path of the data volume | `/opt/bitnami/rabbitmq/var/lib/rabbitmq` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `resources` | resource needs and limits to apply to the pod | {} | +| `replicas` | Replica count | `1` | +| `priorityClassName` | Pod priority class name | `` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.additionalRules` | Additional NetworkPolicy rules | `nil` | +| `nodeSelector` | Node labels for pod assignment | {} | +| `affinity` | Affinity settings for pod assignment | {} | +| `tolerations` | Toleration labels for pod assignment | [] | +| `updateStrategy` | Statefulset update strategy policy | `RollingUpdate` | +| `ingress.enabled` | Enable ingress resource for Management console | `false` | +| `ingress.hostName` | Hostname to your RabbitMQ installation | `nil` | +| `ingress.path` | Path within the url structure | `/` | +| `ingress.tls` | enable ingress with tls | `false` | +| `ingress.tlsSecret` | tls type secret to be used | `myTlsSecret` | +| `ingress.annotations` | ingress annotations as an array | [] | +| `livenessProbe.enabled` | would you like a livenessProbed to be enabled | `true` | +| `livenessProbe.initialDelaySeconds` | number of seconds | 120 | +| `livenessProbe.timeoutSeconds` | number of seconds | 20 | +| `livenessProbe.periodSeconds` | number of seconds | 30 | +| `livenessProbe.failureThreshold` | number of failures | 6 | +| `livenessProbe.successThreshold` | number of successes | 1 | +| `podDisruptionBudget` | Pod Disruption Budget settings | {} | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | number of seconds | 10 | +| `readinessProbe.timeoutSeconds` | number of seconds | 20 | +| `readinessProbe.periodSeconds` | number of seconds | 30 | +| `readinessProbe.failureThreshold` | number of failures | 3 | +| `readinessProbe.successThreshold` | number of successes | 1 | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | Exporter image registry | `docker.io` | +| `metrics.image.repository` | Exporter image name | `bitnami/rabbitmq-exporter` | +| `metrics.image.tag` | Exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Exporter image pull policy | `IfNotPresent` | +| `metrics.livenessProbe.enabled` | would you like a livenessProbed to be enabled | `true` | +| `metrics.livenessProbe.initialDelaySeconds` | number of seconds | 15 | +| `metrics.livenessProbe.timeoutSeconds` | number of seconds | 5 | +| `metrics.livenessProbe.periodSeconds` | number of seconds | 30 | +| `metrics.livenessProbe.failureThreshold` | number of failures | 6 | +| `metrics.livenessProbe.successThreshold` | number of successes | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | number of seconds | 5 | +| `metrics.readinessProbe.timeoutSeconds` | number of seconds | 5 | +| `metrics.readinessProbe.periodSeconds` | number of seconds | 30 | +| `metrics.readinessProbe.failureThreshold` | number of failures | 3 | +| `metrics.readinessProbe.successThreshold` | number of successes | 1 | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using PrometheusOperator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace where servicemonitor resource should be created | `nil` | +| `metrics.serviceMonitor.interval` | Specify the interval at which metrics should be scraped | `30s` | +| `metrics.serviceMonitor.scrapeTimeout` | Specify the timeout after which the scrape is ended | `nil` | +| `metrics.serviceMonitor.relabellings` | Specify Metric Relabellings to add to the scrape endpoint | `nil` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels. | `false` | +| `metrics.serviceMonitor.additionalLabels` | Used to pass Labels that are required by the Installed Prometheus Operator | `{}` | +| `metrics.serviceMonitor.release` | Used to pass Labels release that sometimes should be custom for Prometheus Operator | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as rabbitmq | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.port` | Prometheus metrics exporter port | `9419` | +| `metrics.env` | Exporter [configuration environment variables](https://github.com/kbudde/rabbitmq_exporter#configuration) | `{}` | +| `metrics.resources` | Exporter resource requests/limit | `nil` | +| `metrics.capabilities` | Exporter: Comma-separated list of extended [scraping capabilities supported by the target RabbitMQ server](https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities) | `bert,no_sort` | +| `podLabels` | Additional labels for the statefulset pod(s). | {} | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | +| `forceBoot.enabled` | Executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an unknown order. Use it only if you prefer availability over integrity. | `false` | +| `extraSecrets` | Optionally specify extra secrets to be created by the chart. | `{}` | + +The above parameters map to the env variables defined in [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq). For more information please refer to the [bitnami/rabbitmq](http://github.com/bitnami/bitnami-docker-rabbitmq) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set rabbitmq.username=admin,rabbitmq.password=secretpassword,rabbitmq.erlangCookie=secretcookie \ + bitnami/rabbitmq +``` + +The above command sets the RabbitMQ admin username and password to `admin` and `secretpassword` respectively. Additionally the secure erlang cookie is set to `secretcookie`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/rabbitmq +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Resource needs and limits to apply to the pod: +```diff +- resources: {} ++ resources: ++ requests: ++ memory: 256Mi ++ cpu: 100m +``` + +- Replica count: +```diff +- replicas: 1 ++ replicas: 3 +``` + +- Node labels for pod assignment: +```diff +- nodeSelector: {} ++ nodeSelector: ++ beta.kubernetes.io/arch: amd64 +``` + +- Enable ingress with TLS: +```diff +- ingress.tls: false ++ ingress.tls: true +``` + +- Start a side-car prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +- Enable init container that changes volume permissions in the data directory: +```diff +- volumePermissions.enabled: false ++ volumePermissions.enabled: true +``` + +To horizontally scale this chart once it has been deployed you have two options: + +- Use `kubectl scale` command + +- Upgrading the chart with the following parameters: + +```console +replicas=3 +rabbitmq.password="$RABBITMQ_PASSWORD" +rabbitmq.erlangCookie="$RABBITMQ_ERLANG_COOKIE" +``` + +> Note: please note it's mandatory to indicate the password and erlangCookie that was set the first time the chart was installed to upgrade the chart. Otherwise, new pods won't be able to join the cluster. + +### Load Definitions +It is possible to [load a RabbitMQ definitions file to configure RabbitMQ](http://www.rabbitmq.com/management.html#load-definitions). Because definitions may contain RabbitMQ credentials, [store the JSON as a Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). Within the secret's data, choose a key name that corresponds with the desired load definitions filename (i.e. `load_definition.json`) and use the JSON object as the value. For example: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: rabbitmq-load-definition +type: Opaque +stringData: + load_definition.json: |- + { + "vhosts": [ + { + "name": "/" + } + ] + } +``` + +Then, specify the `management.load_definitions` property as an `extraConfiguration` pointing to the load definition file path within the container (i.e. `/app/load_definition.json`) and set `loadDefinition.enable` to `true`. + +Any load definitions specified will be available within in the container at `/app`. + +> Loading a definition will take precedence over any configuration done through [Helm values](#parameters). + +If needed, you can use `extraSecrets` to let the chart create the secret for you. This way, you don't need to manually create it before deploying a release. For example : + +```yaml +extraSecrets: + load-definition: + load_definition.json: | + { + "vhosts": [ + { + "name": "/" + } + ] + } +rabbitmq: + loadDefinition: + enabled: true + secretName: load-definition + extraConfiguration: | + management.load_definitions = /app/load_definition.json +``` + +### Enabling TLS support + +To enable TLS support you must generate the certificates using RabbitMQ [documentation](https://www.rabbitmq.com/ssl.html#automated-certificate-generation). + +You must include in your values.yaml the caCertificate, serverCertificate and serverKey files. + +```yaml + caCertificate: |- + -----BEGIN CERTIFICATE----- + MIIDRTCCAi2gAwIBAgIJAJPh+paO6a3cMA0GCSqGSIb3DQEBCwUAMDExIDAeBgNV + ... + -----END CERTIFICATE----- + serverCertificate: |- + -----BEGIN CERTIFICATE----- + MIIDqjCCApKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAxMSAwHgYDVQQDDBdUTFNH + ... + -----END CERTIFICATE----- + serverKey: |- + -----BEGIN RSA PRIVATE KEY----- + MIIEpAIBAAKCAQEA2iX3M4d3LHrRAoVUbeFZN3EaGzKhyBsz7GWwTgETiNj+AL7p + .... + -----END RSA PRIVATE KEY----- +``` + +This will be generate a secret with the certs, but is possible specify an existing secret using `existingSecret: name-of-existing-secret-to-rabbitmq`. The secret is of type `kubernetes.io/tls`. + +Disabling [failIfNoPeerCert](https://www.rabbitmq.com/ssl.html#peer-verification-configuration) allows a TLS connection if client fails to provide a certificate + +[sslOptionsVerify](https://www.rabbitmq.com/ssl.html#peer-verification-configuration): When the sslOptionsVerify option is set to verify_peer, the client does send us a certificate, the node must perform peer verification. When set to verify_none, peer verification will be disabled and certificate exchange won't be performed. + +### LDAP + +LDAP support can be enabled in the chart by specifying the `ldap.` parameters while creating a release. The following parameters should be configured to properly enable the LDAP support in the chart. + +- `ldap.enabled`: Enable LDAP support. Defaults to `false`. +- `ldap.server`: LDAP server host. No defaults. +- `ldap.port`: LDAP server port. `389`. +- `ldap.user_dn_pattern`: DN used to bind to LDAP. `cn=${username},dc=example,dc=org`. +- `ldap.tls.enabled`: Enable TLS for LDAP connections. Defaults to `false`. + +For example: + +```console +ldap.enabled="true" +ldap.server="my-ldap-server" +ldap.port="389" +ldap.user_dn_pattern="cn=${username},dc=example,dc=org" +``` + +If `ldap.tls.enabled` is set to true, consider using `ldap.port=636` and checking the settings in the advancedConfiguration. + +### Common issues + +- Changing the password through RabbitMQ's UI can make the pod fail due to the default liveness probes. If you do so, remember to make the chart aware of the new password. Updating the default secret with the password you set through RabbitMQ's UI will automatically recreate the pods. If you are using your own secret, you may have to manually recreate the pods. + +## Persistence + +The [Bitnami RabbitMQ](https://github.com/bitnami/bitnami-docker-rabbitmq) image stores the RabbitMQ data and configurations at the `/opt/bitnami/rabbitmq/var/lib/rabbitmq/` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. By default, the volume is created using dynamic volume provisioning. An existing PersistentVolumeClaim can also be defined. + +### Existing PersistentVolumeClaims + +1. Create the PersistentVolume +1. Create the PersistentVolumeClaim +1. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/rabbitmq +``` + +### Adjust permissions of the persistence volume mountpoint + +As the image runs as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an `initContainer` to change the ownership of the volume before mounting it in the final destination. + +You can enable this `initContainer` by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +### To 6.0.0 + +This new version updates the RabbitMQ image to a [new version based on bash instead of node.js](https://github.com/bitnami/bitnami-docker-rabbitmq#3715-r18-3715-ol-7-r19). However, since this Chart overwrites the container's command, the changes to the container shouldn't affect the Chart. To upgrade, it may be needed to enable the `fastBoot` option, as it is already the case from upgrading from 5.X to 5.Y. + +### To 5.0.0 + +This major release changes the clustering method from `ip` to `hostname`. +This change is needed to fix the persistence. The data dir will now depend on the hostname which is stable instead of the pod IP that might change. + +> IMPORTANT: Note that if you upgrade from a previous version you will lose your data. + +### To 3.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 3.0.0. The following example assumes that the release name is rabbitmq: + +```console +$ kubectl delete statefulset rabbitmq --cascade=false +``` diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/affinity-toleration-values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/affinity-toleration-values.yaml new file mode 100644 index 0000000..6be0ee1 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/affinity-toleration-values.yaml @@ -0,0 +1,14 @@ +tolerations: + - key: foo + operator: "Equal" + value: bar +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: foo + operator: In + values: + - bar diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/default-values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/networkpolicy-values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/networkpolicy-values.yaml new file mode 100644 index 0000000..67ef8d1 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/ci/networkpolicy-values.yaml @@ -0,0 +1,11 @@ +networkPolicy: + enable: true + allowExternal: false + additionalRules: + - matchLabels: + - role: foo + - matchExpressions: + - key: role + operator: In + values: + - bar diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/NOTES.txt b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/NOTES.txt new file mode 100644 index 0000000..3d9faaf --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/NOTES.txt @@ -0,0 +1,79 @@ + +** Please be patient while the chart is being deployed ** + +Credentials: + + Username : {{ .Values.rabbitmq.username }} + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }} -o jsonpath="{.data.rabbitmq-password}" | base64 --decode)" + echo "ErLang Cookie : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }} -o jsonpath="{.data.rabbitmq-erlang-cookie}" | base64 --decode)" + +RabbitMQ can be accessed within the cluster on port {{ .Values.service.nodePort }} at {{ template "rabbitmq.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.rabbitmq.clustering.k8s_domain }} + +To access for outside the cluster, perform the following steps: + +{{- if contains "NodePort" .Values.service.type }} + +Obtain the NodePort IP and ports: + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT_AMQP=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[1].nodePort}" services {{ template "rabbitmq.fullname" . }}) + export NODE_PORT_STATS=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[3].nodePort}" services {{ template "rabbitmq.fullname" . }}) + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$NODE_IP:$NODE_PORT_AMQP/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$NODE_IP:$NODE_PORT_STATS/" + +{{- else if contains "LoadBalancer" .Values.service.type }} + +Obtain the LoadBalancer IP: + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "rabbitmq.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + +To Access the RabbitMQ AMQP port: + + echo "URL : amqp://$SERVICE_IP:{{ .Values.service.port }}/" + +To Access the RabbitMQ Management interface: + + echo "URL : http://$SERVICE_IP:{{ .Values.service.managerPort }}/" + +{{- else if contains "ClusterIP" .Values.service.type }} + +To Access the RabbitMQ AMQP port: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "rabbitmq.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }} + echo "URL : amqp://127.0.0.1:{{ .Values.service.port }}/" + +To Access the RabbitMQ Management interface: + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "rabbitmq.fullname" . }} {{ .Values.service.managerPort }}:{{ .Values.service.managerPort }} + echo "URL : http://127.0.0.1:{{ .Values.service.managerPort }}/" + +{{- end }} + +{{- if .Values.metrics.enabled }} + +To access the RabbitMQ Prometheus metrics, get the RabbitMQ Prometheus exporter URL by running: + + echo "Prometheus Metrics URL: http://127.0.0.1:{{ .Values.metrics.port }}/metrics" + kubectl port-forward --namespace {{ .Release.Namespace }} {{ template "rabbitmq.fullname" . }}-0 {{ .Values.metrics.port }}:{{ .Values.metrics.port }} + +Then, open the URL obtained in a browser. + +{{- end }} + +{{- include "rabbitmq.validateValues" . -}} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/_helpers.tpl b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/_helpers.tpl new file mode 100644 index 0000000..23b3edb --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/_helpers.tpl @@ -0,0 +1,258 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "rabbitmq.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "rabbitmq.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "rabbitmq.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ plugin list +*/}} +{{- define "rabbitmq.plugins" -}} +{{- $plugins := .Values.rabbitmq.plugins | replace " " ", " -}} +{{- if .Values.rabbitmq.extraPlugins -}} +{{- $extraPlugins := .Values.rabbitmq.extraPlugins | replace " " ", " -}} +{{- printf "[%s, %s]." $plugins $extraPlugins | indent 4 -}} +{{- else -}} +{{- printf "[%s]." $plugins | indent 4 -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper RabbitMQ image name +*/}} +{{- define "rabbitmq.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper metrics image name +*/}} +{{- define "rabbitmq.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "rabbitmq.secretPasswordName" -}} + {{- if .Values.rabbitmq.existingPasswordSecret -}} + {{- printf "%s" .Values.rabbitmq.existingPasswordSecret -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Get the erlang secret. +*/}} +{{- define "rabbitmq.secretErlangName" -}} + {{- if .Values.rabbitmq.existingErlangSecret -}} + {{- printf "%s" .Values.rabbitmq.existingErlangSecret -}} + {{- else -}} + {{- printf "%s" (include "rabbitmq.fullname" .) -}} + {{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "rabbitmq.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "rabbitmq.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "rabbitmq.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "rabbitmq.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "rabbitmq.validateValues.ldap" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of rabbitmq - LDAP support +*/}} +{{- define "rabbitmq.validateValues.ldap" -}} +{{- if .Values.ldap.enabled }} +{{- if not (and .Values.ldap.server .Values.ldap.port .Values.ldap.user_dn_pattern) }} +rabbitmq: LDAP + Invalid LDAP configuration. When enabling LDAP support, the parameters "ldap.server", + "ldap.port", and "ldap. user_dn_pattern" are mandatory. Please provide them: + + $ helm install {{ .Release.Name }} bitnami/rabbitmq \ + --set ldap.enabled=true \ + --set ldap.server="lmy-ldap-server" \ + --set ldap.port="389" \ + --set user_dn_pattern="cn=${username},dc=example,dc=org" +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "rabbitmq.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "rabbitmq.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/certs.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/certs.yaml new file mode 100644 index 0000000..1aa999b --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/certs.yaml @@ -0,0 +1,19 @@ +{{- if and (not .Values.rabbitmq.tls.existingSecret) ( .Values.rabbitmq.tls.enabled) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "rabbitmq.fullname" . }}-certs + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: kubernetes.io/tls +data: + ca.crt: + {{ required "A valid .Values.rabbitmq.tls.caCertificate entry required!" .Values.rabbitmq.tls.caCertificate | b64enc | quote }} + tls.crt: + {{ required "A valid .Values.rabbitmq.tls.serverCertificate entry required!" .Values.rabbitmq.tls.serverCertificate| b64enc | quote }} + tls.key: + {{ required "A valid .Values.rabbitmq.tls.serverKey entry required!" .Values.rabbitmq.tls.serverKey | b64enc | quote }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/configuration.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/configuration.yaml new file mode 100644 index 0000000..acf71bb --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/configuration.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-config + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + enabled_plugins: |- +{{ template "rabbitmq.plugins" . }} + rabbitmq.conf: |- + ##username and password + default_user={{.Values.rabbitmq.username}} + default_pass=CHANGEME +{{ .Values.rabbitmq.configuration | indent 4 }} +{{ .Values.rabbitmq.extraConfiguration | indent 4 }} +{{- if .Values.rabbitmq.tls.enabled }} + ssl_options.verify={{ .Values.rabbitmq.tls.sslOptionsVerify }} + listeners.ssl.default={{ .Values.service.tlsPort }} + ssl_options.fail_if_no_peer_cert={{ .Values.rabbitmq.tls.failIfNoPeerCert }} + ssl_options.cacertfile = /opt/bitnami/rabbitmq/certs/ca_certificate.pem + ssl_options.certfile = /opt/bitnami/rabbitmq/certs/server_certificate.pem + ssl_options.keyfile = /opt/bitnami/rabbitmq/certs/server_key.pem +{{- end }} +{{- if .Values.ldap.enabled }} + auth_backends.1 = rabbit_auth_backend_ldap + auth_backends.2 = internal + auth_ldap.servers.1 = {{ .Values.ldap.server }} + auth_ldap.port = {{ .Values.ldap.port }} + auth_ldap.user_dn_pattern = {{ .Values.ldap.user_dn_pattern }} +{{- if .Values.ldap.tls.enabled }} + auth_ldap.use_ssl = true +{{- end }} +{{- end }} + +{{ if .Values.rabbitmq.advancedConfiguration}} + advanced.config: |- +{{ .Values.rabbitmq.advancedConfiguration | indent 4 }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/healthchecks.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/healthchecks.yaml new file mode 100644 index 0000000..8b5ed46 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/healthchecks.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "rabbitmq.fullname" . }}-healthchecks + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +data: + rabbitmq-health-check: |- + #!/bin/sh + START_FLAG=/opt/bitnami/rabbitmq/var/lib/rabbitmq/.start + if [ -f ${START_FLAG} ]; then + rabbitmqctl node_health_check + RESULT=$? + if [ $RESULT -ne 0 ]; then + rabbitmqctl status + exit $? + fi + rm -f ${START_FLAG} + exit ${RESULT} + fi + rabbitmq-api-check $1 $2 + rabbitmq-api-check: |- + #!/bin/sh + set -e + URL=$1 + EXPECTED=$2 + ACTUAL=$(curl --silent --show-error --fail "${URL}") + echo "${ACTUAL}" + test "${EXPECTED}" = "${ACTUAL}" \ No newline at end of file diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/ingress.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/ingress.yaml new file mode 100644 index 0000000..4fa890d --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/ingress.yaml @@ -0,0 +1,41 @@ +{{- if .Values.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: "{{ template "rabbitmq.fullname" . }}" + labels: + app: "{{ template "rabbitmq.name" . }}" + chart: "{{ template "rabbitmq.chart" . }}" + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} + annotations: + {{- if .Values.ingress.tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- if .Values.ingress.hostName }} + - host: {{ .Values.ingress.hostName }} + http: + {{- else }} + - http: + {{- end }} + paths: + - path: {{ .Values.ingress.path }} + backend: + serviceName: {{ template "rabbitmq.fullname" . }} + servicePort: {{ .Values.service.managerPort }} +{{- if .Values.ingress.tls }} + tls: + - hosts: + {{- if .Values.ingress.hostName }} + - {{ .Values.ingress.hostName }} + secretName: {{ .Values.ingress.tlsSecret }} + {{- else}} + - secretName: {{ .Values.ingress.tlsSecret }} + {{- end }} +{{- end }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/networkpolicy.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/networkpolicy.yaml new file mode 100644 index 0000000..6d0dc5a --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/networkpolicy.yaml @@ -0,0 +1,39 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +spec: + podSelector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: {{ .Release.Name | quote }} + ingress: + # Allow inbound connections + + - ports: + - port: 4369 # EPMD + - port: {{ .Values.service.port }} + - port: {{ .Values.service.tlsPort }} + - port: {{ .Values.service.distPort }} + - port: {{ .Values.service.managerPort }} + + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "rabbitmq.fullname" . }}-client: "true" + {{- with .Values.networkPolicy.additionalRules }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- end }} + + # Allow prometheus scrapes + - ports: + - port: {{ .Values.metrics.port }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/pdb.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/pdb.yaml new file mode 100644 index 0000000..0e5f544 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/pdb.yaml @@ -0,0 +1,17 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + selector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/prometheusrule.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/prometheusrule.yaml new file mode 100644 index 0000000..15f05e9 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "rabbitmq.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "rabbitmq.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/role.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/role.yaml new file mode 100644 index 0000000..f4bea31 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/role.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbacEnabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/rolebinding.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/rolebinding.yaml new file mode 100644 index 0000000..bf315b5 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbacEnabled }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +subjects: +- kind: ServiceAccount + name: {{ template "rabbitmq.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "rabbitmq.fullname" . }}-endpoint-reader +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/secrets.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/secrets.yaml new file mode 100644 index 0000000..619215f --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/secrets.yaml @@ -0,0 +1,38 @@ +{{- if or (not .Values.rabbitmq.existingErlangSecret) (not .Values.rabbitmq.existingPasswordSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + {{ if not .Values.rabbitmq.existingPasswordSecret }}{{ if .Values.rabbitmq.password }} + rabbitmq-password: {{ .Values.rabbitmq.password | b64enc | quote }} + {{ else }} + rabbitmq-password: {{ randAlphaNum 10 | b64enc | quote }} + {{ end }}{{ end }} + {{ if not .Values.rabbitmq.existingErlangSecret }}{{ if .Values.rabbitmq.erlangCookie }} + rabbitmq-erlang-cookie: {{ .Values.rabbitmq.erlangCookie | b64enc | quote }} + {{ else }} + rabbitmq-erlang-cookie: {{ randAlphaNum 32 | b64enc | quote }} + {{ end }}{{ end }} +{{- end }} +{{- range $key, $value := .Values.extraSecrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ $key }} + labels: + app: {{ template "rabbitmq.name" $ }} + chart: {{ template "rabbitmq.chart" $ }} + release: "{{ $.Release.Name }}" + heritage: "{{ $.Release.Service }}" +type: Opaque +stringData: +{{ $value | toYaml | nindent 2 }} +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/serviceaccount.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/serviceaccount.yaml new file mode 100644 index 0000000..b4ac6aa --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if .Values.rbacEnabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/servicemonitor.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/servicemonitor.yaml new file mode 100644 index 0000000..0b556f6 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/servicemonitor.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "rabbitmq.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + heritage: "{{ .Release.Service }}" + release: {{ if .Values.metrics.serviceMonitor.release }}"{{ .Values.metrics.serviceMonitor.release }}"{{ else }}"{{ .Release.Name }}"{{ end }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} +{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- if .Values.metrics.serviceMonitor.relabellings }} + metricRelabelings: +{{ toYaml .Values.metrics.serviceMonitor.relabellings | indent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" +{{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/statefulset.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/statefulset.yaml new file mode 100644 index 0000000..08d4364 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/statefulset.yaml @@ -0,0 +1,372 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + serviceName: {{ template "rabbitmq.fullname" . }}-headless + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicas }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" + template: + metadata: + labels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" + chart: {{ template "rabbitmq.chart" . }} + {{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} + {{- end }} + annotations: + {{- if or (not .Values.rabbitmq.existingErlangSecret) (not .Values.rabbitmq.existingPasswordSecret) }} + checksum/secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "rabbitmq.imagePullSecrets" . | indent 6 }} + {{- if .Values.rbacEnabled}} + serviceAccountName: {{ template "rabbitmq.fullname" . }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "rabbitmq.tplValue" (dict "value" .Values.affinity "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: 10 + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled .Values.securityContext.enabled }} + initContainers: + - name: volume-permissions + image: "{{ template "rabbitmq.volumePermissions.image" . }}" + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: ["/bin/chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.path }}"] + securityContext: + runAsUser: 0 + resources: +{{ toYaml .Values.volumePermissions.resources | indent 10 }} + volumeMounts: + - name: data + mountPath: "{{ .Values.persistence.path }}" + {{- end }} + containers: + - name: rabbitmq + image: {{ template "rabbitmq.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: + - bash + - -ec + - | + mkdir -p /opt/bitnami/rabbitmq/.rabbitmq/ + mkdir -p /opt/bitnami/rabbitmq/etc/rabbitmq/ + touch /opt/bitnami/rabbitmq/var/lib/rabbitmq/.start + #persist the erlang cookie in both places for server and cli tools + echo $RABBITMQ_ERL_COOKIE > /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie + cp /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/.rabbitmq/ + #change permission so only the user has access to the cookie file + chmod 600 /opt/bitnami/rabbitmq/.rabbitmq/.erlang.cookie /opt/bitnami/rabbitmq/var/lib/rabbitmq/.erlang.cookie + #copy the mounted configuration to both places + cp /opt/bitnami/rabbitmq/conf/* /opt/bitnami/rabbitmq/etc/rabbitmq + # Apply resources limits + {{- if .Values.rabbitmq.setUlimitNofiles }} + ulimit -n "${RABBITMQ_ULIMIT_NOFILES}" + {{- end }} + #replace the default password that is generated + sed -i "/CHANGEME/cdefault_pass=${RABBITMQ_PASSWORD//\\/\\\\}" /opt/bitnami/rabbitmq/etc/rabbitmq/rabbitmq.conf + {{- if and .Values.persistence.enabled .Values.forceBoot.enabled }} + if [ -d "{{ .Values.persistence.path }}/mnesia/${RABBITMQ_NODENAME}" ]; then rabbitmqctl force_boot; fi + {{- end }} + exec rabbitmq-server + {{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /opt/bitnami/rabbitmq/conf + - name: healthchecks + mountPath: /usr/local/sbin/rabbitmq-api-check + subPath: rabbitmq-api-check + - name: healthchecks + mountPath: /usr/local/sbin/rabbitmq-health-check + subPath: rabbitmq-health-check + {{- if .Values.rabbitmq.tls.enabled }} + - name: {{ template "rabbitmq.fullname" . }}-certs + mountPath: /opt/bitnami/rabbitmq/certs + {{- end }} + - name: data + mountPath: "{{ .Values.persistence.path }}" + {{- if .Values.rabbitmq.loadDefinition.enabled }} + - name: load-definition-volume + mountPath: /app + readOnly: true + {{- end }} + ports: + - name: epmd + containerPort: 4369 + - name: amqp + containerPort: {{ .Values.service.port }} + {{- if .Values.rabbitmq.tls.enabled }} + - name: amqp-ssl + containerPort: {{ .Values.service.tlsPort }} + {{- end }} + - name: dist + containerPort: {{ .Values.service.distPort }} + - name: stats + containerPort: {{ .Values.service.managerPort }} +{{- if .Values.service.extraContainerPorts }} +{{ toYaml .Values.service.extraContainerPorts | indent 8 }} +{{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - sh + - -c + - rabbitmq-api-check "http://{{ .Values.rabbitmq.username }}:$RABBITMQ_PASSWORD@127.0.0.1:{{ .Values.service.managerPort }}/api/healthchecks/node" '{"status":"ok"}' + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - sh + - -c + - rabbitmq-health-check "http://{{ .Values.rabbitmq.username }}:$RABBITMQ_PASSWORD@127.0.0.1:{{ .Values.service.managerPort }}/api/healthchecks/node" '{"status":"ok"}' + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- end }} + {{- if and (gt (.Values.replicas | int) 1) ( eq .Values.rabbitmq.clustering.rebalance true) }} + lifecycle: + postStart: + exec: + command: + - /bin/sh + - -c + - until rabbitmqctl cluster_status >/dev/null; do echo Waiting for + cluster readiness...; sleep 5 ; done; rabbitmq-queues rebalance "all" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: K8S_SERVICE_NAME + value: "{{ template "rabbitmq.fullname" . }}-headless" + - name: K8S_ADDRESS_TYPE + value: {{ .Values.rabbitmq.clustering.address_type }} + {{- if (eq "hostname" .Values.rabbitmq.clustering.address_type) }} + - name: RABBITMQ_NODENAME + value: "rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.rabbitmq.clustering.k8s_domain }}" + - name: K8S_HOSTNAME_SUFFIX + value: ".$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.{{ .Values.rabbitmq.clustering.k8s_domain }}" + {{- else }} + - name: RABBITMQ_NODENAME + {{- if .Values.rabbitmq.rabbitmqClusterNodeName }} + value: {{ .Values.rabbitmq.rabbitmqClusterNodeName | quote }} + {{- else }} + value: "rabbit@$(MY_POD_NAME)" + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: RABBITMQ_LDAP_ENABLE + value: "yes" + - name: RABBITMQ_LDAP_TLS + value: {{ ternary "yes" "no" .Values.ldap.tls.enabled | quote }} + - name: RABBITMQ_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: RABBITMQ_LDAP_SERVER_PORT + value: {{ .Values.ldap.port | quote }} + - name: RABBITMQ_LDAP_USER_DN_PATTERN + value: {{ .Values.ldap.user_dn_pattern }} + {{- end }} + - name: RABBITMQ_LOGS + value: {{ .Values.rabbitmq.logs | quote }} + - name: RABBITMQ_ULIMIT_NOFILES + value: {{ .Values.rabbitmq.ulimitNofiles | quote }} + {{- if and .Values.rabbitmq.maxAvailableSchedulers }} + - name: RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS + value: {{ printf "+S %s:%s" (toString .Values.rabbitmq.maxAvailableSchedulers) (toString .Values.rabbitmq.onlineSchedulers) -}} + {{- end }} + - name: RABBITMQ_USE_LONGNAME + value: "true" + - name: RABBITMQ_ERL_COOKIE + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretErlangName" . }} + key: rabbitmq-erlang-cookie + - name: RABBITMQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + {{- range $key, $value := .Values.rabbitmq.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "rabbitmq.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + env: + - name: RABBIT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "rabbitmq.secretPasswordName" . }} + key: rabbitmq-password + - name: RABBIT_URL + value: "http://{{ .Values.metrics.rabbitmqAddress }}:{{ .Values.service.managerPort }}" + - name: RABBIT_USER + value: {{ .Values.rabbitmq.username }} + - name: PUBLISH_PORT + value: "{{ .Values.metrics.port }}" + {{ if .Values.metrics.capabilities }} + - name: RABBIT_CAPABILITIES + value: "{{ .Values.metrics.capabilities }}" + {{- end }} + {{- range $key, $value := .Values.metrics.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + {{- if .Values.metrics.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.metrics.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /metrics + port: metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + {{- end }} + resources: +{{ toYaml .Values.metrics.resources | indent 10 }} +{{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- if .Values.securityContext.extra }} + {{- toYaml .Values.securityContext.extra | nindent 8 }} + {{- end }} + {{- end }} + volumes: + {{- if .Values.rabbitmq.tls.enabled }} + - name: {{ template "rabbitmq.fullname" . }}-certs + secret: + secretName: {{ if .Values.rabbitmq.tls.existingSecret }}{{ .Values.rabbitmq.tls.existingSecret }}{{- else }}{{ template "rabbitmq.fullname" . }}-certs{{- end }} + items: + - key: ca.crt + path: ca_certificate.pem + - key: tls.crt + path: server_certificate.pem + - key: tls.key + path: server_key.pem + {{- end }} + - name: config-volume + configMap: + name: {{ template "rabbitmq.fullname" . }}-config + items: + - key: rabbitmq.conf + path: rabbitmq.conf + {{- if .Values.rabbitmq.advancedConfiguration}} + - key: advanced.config + path: advanced.config + {{- end }} + - key: enabled_plugins + path: enabled_plugins + - name: healthchecks + configMap: + name: {{ template "rabbitmq.fullname" . }}-healthchecks + items: + - key: rabbitmq-health-check + path: rabbitmq-health-check + mode: 111 + - key: rabbitmq-api-check + path: rabbitmq-api-check + mode: 111 + {{- if .Values.rabbitmq.loadDefinition.enabled }} + - name: load-definition-volume + secret: + secretName: {{ .Values.rabbitmq.loadDefinition.secretName | quote }} + {{- end }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + {{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} + {{- end }} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "rabbitmq.storageClass" . }} + {{- end }} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc-headless.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc-headless.yaml new file mode 100644 index 0000000..14ad08e --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc-headless.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "rabbitmq.fullname" . }}-headless + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +spec: + clusterIP: None + ports: + - name: epmd + port: 4369 + targetPort: epmd + - name: amqp + port: {{ .Values.service.port }} + targetPort: amqp +{{- if .Values.rabbitmq.tls.enabled }} + - name: amqp-tls + port: {{ .Values.service.tlsPort }} + targetPort: amqp-tls +{{- end }} + - name: dist + port: {{ .Values.service.distPort }} + targetPort: dist + - name: stats + port: {{ .Values.service.managerPort }} + targetPort: stats + selector: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc.yaml new file mode 100644 index 0000000..f811a32 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/templates/svc.yaml @@ -0,0 +1,78 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "rabbitmq.fullname" . }} + labels: + app: {{ template "rabbitmq.name" . }} + chart: {{ template "rabbitmq.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +{{- if or .Values.service.annotations .Values.metrics.enabled }} + annotations: +{{- end }} +{{- if .Values.service.annotations }} +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} +{{- if .Values.metrics.enabled }} +{{ toYaml .Values.metrics.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.service.type }} +{{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{ with .Values.service.loadBalancerSourceRanges }} +{{ toYaml . | indent 4 }} +{{- end }} +{{- end }} + {{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: epmd + port: 4369 + targetPort: epmd + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} + - name: amqp + port: {{ .Values.service.port }} + targetPort: amqp + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- else if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- if .Values.rabbitmq.tls.enabled }} + - name: amqp-ssl + port: {{ .Values.service.tlsPort }} + targetPort: amqp-ssl + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodeTlsPort))) }} + nodePort: {{ .Values.service.nodeTlsPort }} + {{- end }} + {{- end }} + - name: dist + port: {{ .Values.service.distPort }} + targetPort: dist + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} + - name: stats + port: {{ .Values.service.managerPort }} + targetPort: stats + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + port: {{ .Values.metrics.port }} + targetPort: metrics + {{- if (eq .Values.service.type "ClusterIP") }} + nodePort: null + {{- end }} +{{- end }} +{{- if .Values.service.extraPorts }} +{{ toYaml .Values.service.extraPorts | indent 2 }} +{{- end }} + selector: + app: {{ template "rabbitmq.name" . }} + release: "{{ .Release.Name }}" diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values-production.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values-production.yaml new file mode 100644 index 0000000..51b65d6 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values-production.yaml @@ -0,0 +1,574 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.8.2-debian-10-r41 + + ## set to true if you would like to see extra information on logs + ## it turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override rabbitmq.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override rabbitmq.fullname template +## +# fullnameOverride: + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## does your cluster have rbac enabled? assume yes by default +rbacEnabled: true + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## +podManagementPolicy: OrderedReady + +## section of specific values for rabbitmq +rabbitmq: + ## RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # password: + # existingPasswordSecret: name-of-existing-secret + + ## Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # erlangCookie: + # existingErlangSecret: name-of-existing-secret + + ## Node name to cluster with. e.g.: `clusternode@hostname` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # rabbitmqClusterNodeName: + + ## Value for the RABBITMQ_LOGS environment variable + ## ref: https://www.rabbitmq.com/logging.html#log-file-location + ## + logs: '-' + + ## RabbitMQ Max File Descriptors + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits + ## + setUlimitNofiles: true + ulimitNofiles: '65536' + + ## RabbitMQ maximum available scheduler threads and online scheduler threads + ## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads + ## + maxAvailableSchedulers: 2 + onlineSchedulers: 1 + + ## Plugins to enable + plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" + + ## Extra plugins to enable + ## Use this instead of `plugins` to add new plugins + extraPlugins: "rabbitmq_auth_backend_ldap" + + ## Clustering settings + clustering: + address_type: hostname + k8s_domain: cluster.local + ## Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + rebalance: false + + loadDefinition: + enabled: false + secretName: load-definition + + ## environment variables to configure rabbitmq + ## ref: https://www.rabbitmq.com/configure.html#customise-environment + env: {} + + ## Configuration file content: required cluster configuration + ## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead + configuration: |- + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + + ## Configuration file content: extra configuration + ## Use this instead of `configuration` to add more configuration + extraConfiguration: |- + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json + + ## Configuration file content: advanced configuration + ## Use this as additional configuraton in classic config format (Erlang term configuration format) + ## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. + ## advancedConfiguration: |- + ## [{ + ## rabbitmq_auth_backend_ldap, + ## [{ + ## ssl_options, + ## [{ + ## verify, verify_none + ## }, { + ## fail_if_no_peer_cert, + ## false + ## }] + ## ]} + ## }]. + ## + advancedConfiguration: |- + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## + tls: + enabled: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + # existingSecret: name-of-existing-secret-to-rabbitmq + +## LDAP configuration +## +ldap: + enabled: false + server: "" + port: "389" + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + # If you enabled TLS/SSL you can set advaced options using the advancedConfiguration parameter. + enabled: false + +## Kubernetes service type +service: + type: ClusterIP + ## Node port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # nodePort: 30672 + + ## Set the LoadBalancerIP + ## + # loadBalancerIP: + + ## Node port Tls + ## + # nodeTlsPort: 30671 + + ## Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## Amqp Tls port + ## + tlsPort: 5671 + + ## Dist port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPort: 15672 + + ## Service annotations + annotations: {} + # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + + ## Extra ports to expose + # extraPorts: + + ## Extra ports to be included in container spec, primarily informational + # extraContainerPorts: + +# Additional pod labels to apply +podLabels: {} + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + extra: {} + +persistence: + ## this enables PVC templates that will create one per pod + enabled: true + + ## rabbitmq data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + + ## Existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + # existingClaim: "" + + # If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well. + size: 8Gi + + # persistence directory, maps to the rabbitmq data directory + path: /opt/bitnami/rabbitmq/var/lib/rabbitmq + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 100m + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - matchLabels: + # - role: frontend + # - matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +## Replica count, set to 3 to provide a default available cluster +replicas: 3 + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## updateStrategy for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Node labels and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +nodeSelector: + beta.kubernetes.io/arch: amd64 +tolerations: [] +affinity: {} + +## affinity: | +## podAntiAffinity: +## requiredDuringSchedulingIgnoredDuringExecution: +## - labelSelector: +## matchLabels: +## app: {{ template "rabbitmq.name" . }} +## release: {{ .Release.Name | quote }} +## topologyKey: kubernetes.io/hostname +## preferredDuringSchedulingIgnoredDuringExecution: +## - weight: 100 +## podAffinityTerm: +## labelSelector: +## matchLabels: +## app: {{ template "rabbitmq.name" . }} +## release: {{ .Release.Name | quote }} +## topologyKey: failure-domain.beta.kubernetes.io/zone + +## annotations for rabbitmq pods +podAnnotations: {} + +## Configure the podDisruptionBudget +podDisruptionBudget: {} +# maxUnavailable: 1 +# minAvailable: 1 + +## Configure the ingress resource that allows you to access the +## Wordpress installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + ## hostName: foo.bar.com + path: / + + ## Set this to true in order to enable TLS on the ingress record + ## A side effect of this will be that the backend wordpress service will be connected at port 443 + tls: true + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: myTlsSecret + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + +## The following settings are to configure the frequency of the lifeness and readiness probes +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +metrics: + enabled: true + image: + registry: docker.io + repository: bitnami/rabbitmq-exporter + tag: 0.29.0-debian-10-r38 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## environment variables to configure rabbitmq_exporter + ## ref: https://github.com/kbudde/rabbitmq_exporter#configuration + env: {} + ## Metrics exporter port + port: 9419 + ## Comma-separated list of extended scraping capabilities supported by the target RabbitMQ server + ## ref: https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities + capabilities: "bert,no_sort" + resources: {} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9419" + + livenessProbe: + enabled: true + initialDelaySeconds: 15 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + # namespace: "" + ## Specify the interval at which metrics should be scraped + interval: 30s + ## Specify the timeout after which the scrape is ended + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + honorLabels: false + ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work + # release: "" + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## List of reules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## Please adapt them to your needs. + ## Make sure to constraint the rules to the current rabbitmq service. + ## Also make sure to escape what looks like helm template. + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + # description: RabbitMQ node down + + # - alert: ClusterDown + # expr: | + # sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + # < {{ .Values.replicas }} + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Less than {{ .Values.replicas }} nodes running in RabbitMQ cluster + # VALUE = {{ "{{ $value }}" }} + + # - alert: ClusterPartition + # expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Cluster partition + # VALUE = {{ "{{ $value }}" }} + + # - alert: OutOfMemory + # expr: | + # rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + # / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + # * 100 > 90 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + # LABELS: {{ "{{ $labels }}" }} + + # - alert: TooManyConnections + # expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + # description: | + # RabbitMQ instance has too many connections (> 1000) + # VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: true + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an +## unknown order. +## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot +## +forceBoot: + enabled: false + +## Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## +extraSecrets: {} + # load-definition: + # load_definition.json: | + # { + # ... + # } diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.schema.json b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.schema.json new file mode 100644 index 0000000..038f577 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "rabbitmq": { + "type": "object", + "properties": { + "username": { + "type": "string", + "title": "RabbitMQ user", + "form": true + }, + "password": { + "type": "string", + "title": "RabbitMQ password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set" + }, + "extraConfiguration": { + "type": "string", + "title": "Extra RabbitMQ Configuration", + "form": true, + "render": "textArea", + "description": "Extra configuration to be appended to RabbitMQ Configuration" + } + } + }, + "replicas": { + "type": "integer", + "form": true, + "title": "Number of replicas", + "description": "Number of replicas to deploy" + }, + "persistence": { + "type": "object", + "title": "Persistence configuration", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "condition": false, + "value": "persistence.enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "condition": false, + "value": "metrics.enabled" + } + } + } + } + } + } + } +} diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.yaml new file mode 100644 index 0000000..d568df4 --- /dev/null +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/charts/rabbitmq/values.yaml @@ -0,0 +1,555 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami RabbitMQ image version +## ref: https://hub.docker.com/r/bitnami/rabbitmq/tags/ +## +image: + registry: docker.io + repository: bitnami/rabbitmq + tag: 3.8.2-debian-10-r41 + + ## set to true if you would like to see extra information on logs + ## it turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override rabbitmq.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override rabbitmq.fullname template +## +# fullnameOverride: + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## does your cluster have rbac enabled? assume yes by default +rbacEnabled: true + +## RabbitMQ should be initialized one by one when building cluster for the first time. +## Therefore, the default value of podManagementPolicy is 'OrderedReady' +## Once the RabbitMQ participates in the cluster, it waits for a response from another +## RabbitMQ in the same cluster at reboot, except the last RabbitMQ of the same cluster. +## If the cluster exits gracefully, you do not need to change the podManagementPolicy +## because the first RabbitMQ of the statefulset always will be last of the cluster. +## However if the last RabbitMQ of the cluster is not the first RabbitMQ due to a failure, +## you must change podManagementPolicy to 'Parallel'. +## ref : https://www.rabbitmq.com/clustering.html#restarting +## +podManagementPolicy: OrderedReady + +## section of specific values for rabbitmq +rabbitmq: + ## RabbitMQ application username + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + username: user + + ## RabbitMQ application password + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # password: + # existingPasswordSecret: name-of-existing-secret + + ## Erlang cookie to determine whether different nodes are allowed to communicate with each other + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # erlangCookie: + # existingErlangSecret: name-of-existing-secret + + ## Node name to cluster with. e.g.: `clusternode@hostname` + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # rabbitmqClusterNodeName: + + ## Value for the RABBITMQ_LOGS environment variable + ## ref: https://www.rabbitmq.com/logging.html#log-file-location + ## + logs: '-' + + ## RabbitMQ Max File Descriptors + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## ref: https://www.rabbitmq.com/install-debian.html#kernel-resource-limits + ## + setUlimitNofiles: true + ulimitNofiles: '65536' + + ## RabbitMQ maximum available scheduler threads and online scheduler threads + ## ref: https://hamidreza-s.github.io/erlang/scheduling/real-time/preemptive/migration/2016/02/09/erlang-scheduler-details.html#scheduler-threads + ## + maxAvailableSchedulers: 2 + onlineSchedulers: 1 + + ## Plugins to enable + plugins: "rabbitmq_management rabbitmq_peer_discovery_k8s" + + ## Extra plugins to enable + ## Use this instead of `plugins` to add new plugins + extraPlugins: "rabbitmq_auth_backend_ldap" + + ## Clustering settings + clustering: + address_type: hostname + k8s_domain: cluster.local + ## Rebalance master for queues in cluster when new replica is created + ## ref: https://www.rabbitmq.com/rabbitmq-queues.8.html#rebalance + rebalance: false + + loadDefinition: + enabled: false + secretName: load-definition + + ## environment variables to configure rabbitmq + ## ref: https://www.rabbitmq.com/configure.html#customise-environment + env: {} + + ## Configuration file content: required cluster configuration + ## Do not override unless you know what you are doing. To add more configuration, use `extraConfiguration` of `advancedConfiguration` instead + configuration: |- + ## Clustering + cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s + cluster_formation.k8s.host = kubernetes.default.svc.cluster.local + cluster_formation.node_cleanup.interval = 10 + cluster_formation.node_cleanup.only_log_warning = true + cluster_partition_handling = autoheal + # queue master locator + queue_master_locator=min-masters + # enable guest user + loopback_users.guest = false + + ## Configuration file content: extra configuration + ## Use this instead of `configuration` to add more configuration + extraConfiguration: |- + #disk_free_limit.absolute = 50MB + #management.load_definitions = /app/load_definition.json + + ## Configuration file content: advanced configuration + ## Use this as additional configuraton in classic config format (Erlang term configuration format) + ## + ## If you set LDAP with TLS/SSL enabled and you are using self-signed certificates, uncomment these lines. + ## advancedConfiguration: |- + ## [{ + ## rabbitmq_auth_backend_ldap, + ## [{ + ## ssl_options, + ## [{ + ## verify, verify_none + ## }, { + ## fail_if_no_peer_cert, + ## false + ## }] + ## ]} + ## }]. + ## + advancedConfiguration: |- + + ## Enable encryption to rabbitmq + ## ref: https://www.rabbitmq.com/ssl.html + ## + tls: + enabled: false + failIfNoPeerCert: true + sslOptionsVerify: verify_peer + caCertificate: |- + serverCertificate: |- + serverKey: |- + # existingSecret: name-of-existing-secret-to-rabbitmq + +## LDAP configuration +## +ldap: + enabled: false + server: "" + port: "389" + user_dn_pattern: cn=${username},dc=example,dc=org + tls: + # If you enabled TLS/SSL you can set advaced options using the advancedConfiguration parameter. + enabled: false + +## Kubernetes service type +service: + type: ClusterIP + ## Node port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + # nodePort: 30672 + + ## Set the LoadBalancerIP + ## + # loadBalancerIP: + + ## Node port Tls + ## + # nodeTlsPort: 30671 + + ## Amqp port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + port: 5672 + + ## Amqp Tls port + ## + tlsPort: 5671 + + ## Dist port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + distPort: 25672 + + ## RabbitMQ Manager port + ## ref: https://github.com/bitnami/bitnami-docker-rabbitmq#environment-variables + ## + managerPort: 15672 + + ## Service annotations + annotations: {} + # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0 + + ## Load Balancer sources + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + + ## Extra ports to expose + # extraPorts: + + ## Extra ports to be included in container spec, primarily informational + # extraContainerPorts: + +# Additional pod labels to apply +podLabels: {} + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + extra: {} + +persistence: + ## this enables PVC templates that will create one per pod + enabled: true + + ## rabbitmq data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + + ## Existing PersistentVolumeClaims + ## The value is evaluated as a template + ## So, for example, the name can depend on .Release or .Chart + # existingClaim: "" + + # If you change this value, you might have to adjust `rabbitmq.diskFreeLimit` as well. + size: 8Gi + + # persistence directory, maps to the rabbitmq data directory + path: /opt/bitnami/rabbitmq/var/lib/rabbitmq + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the ports RabbitMQ is listening + ## on. When true, RabbitMQ will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - matchLabels: + # - role: frontend + # - matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +## Replica count, set to 1 to provide a default available cluster +replicas: 1 + +## Pod priority +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" + +## updateStrategy for RabbitMQ statefulset +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +updateStrategy: + type: RollingUpdate + +## Node labels and tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature +nodeSelector: {} +tolerations: [] +affinity: {} +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 +## annotations for rabbitmq pods +podAnnotations: {} + +## Configure the ingress resource that allows you to access the +## Wordpress installation. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + ## hostName: foo.bar.com + path: / + + ## Set this to true in order to enable TLS on the ingress record + ## A side effect of this will be that the backend wordpress service will be connected at port 443 + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: myTlsSecret + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + +## The following settings are to configure the frequency of the lifeness and readiness probes +livenessProbe: + enabled: true + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + +metrics: + enabled: false + image: + registry: docker.io + repository: bitnami/rabbitmq-exporter + tag: 0.29.0-debian-10-r38 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## environment variables to configure rabbitmq_exporter + ## ref: https://github.com/kbudde/rabbitmq_exporter#configuration + env: {} + ## Metrics exporter port + port: 9419 + ## RabbitMQ address to connect to (from the same Pod, usually the local loopback address). + ## If your Kubernetes cluster does not support IPv6, you can change to `127.0.0.1` in order to force IPv4. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/#networking + rabbitmqAddress: localhost + ## Comma-separated list of extended scraping capabilities supported by the target RabbitMQ server + ## ref: https://github.com/kbudde/rabbitmq_exporter#extended-rabbitmq-capabilities + capabilities: "bert,no_sort" + resources: {} + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9419" + + livenessProbe: + enabled: true + initialDelaySeconds: 15 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + timeoutSeconds: 5 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + serviceMonitor: + ## If the operator is installed in your cluster, set to true to create a Service Monitor Entry + enabled: false + ## Specify the namespace in which the serviceMonitor resource will be created + # namespace: "" + ## Specify the interval at which metrics should be scraped + interval: 30s + ## Specify the timeout after which the scrape is ended + # scrapeTimeout: 30s + ## Specify Metric Relabellings to add to the scrape endpoint + # relabellings: + ## Specify honorLabels parameter to add the scrape endpoint + honorLabels: false + ## Specify the release for ServiceMonitor. Sometimes it should be custom for prometheus operator to work + # release: "" + ## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + additionalLabels: {} + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + rules: [] + ## List of reules, used as template by Helm. + ## These are just examples rules inspired from https://awesome-prometheus-alerts.grep.to/rules.html + ## Please adapt them to your needs. + ## Make sure to constraint the rules to the current rabbitmq service. + ## Also make sure to escape what looks like helm template. + # - alert: RabbitmqDown + # expr: rabbitmq_up{service="{{ template "rabbitmq.fullname" . }}"} == 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Rabbitmq down (instance {{ "{{ $labels.instance }}" }}) + # description: RabbitMQ node down + + # - alert: ClusterDown + # expr: | + # sum(rabbitmq_running{service="{{ template "rabbitmq.fullname" . }}"}) + # < {{ .Values.replicas }} + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster down (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Less than {{ .Values.replicas }} nodes running in RabbitMQ cluster + # VALUE = {{ "{{ $value }}" }} + + # - alert: ClusterPartition + # expr: rabbitmq_partitions{service="{{ template "rabbitmq.fullname" . }}"} > 0 + # for: 5m + # labels: + # severity: error + # annotations: + # summary: Cluster partition (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Cluster partition + # VALUE = {{ "{{ $value }}" }} + + # - alert: OutOfMemory + # expr: | + # rabbitmq_node_mem_used{service="{{ template "rabbitmq.fullname" . }}"} + # / rabbitmq_node_mem_limit{service="{{ template "rabbitmq.fullname" . }}"} + # * 100 > 90 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Out of memory (instance {{ "{{ $labels.instance }}" }}) + # description: | + # Memory available for RabbmitMQ is low (< 10%)\n VALUE = {{ "{{ $value }}" }} + # LABELS: {{ "{{ $labels }}" }} + + # - alert: TooManyConnections + # expr: rabbitmq_connectionsTotal{service="{{ template "rabbitmq.fullname" . }}"} > 1000 + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Too many connections (instance {{ "{{ $labels.instance }}" }}) + # description: | + # RabbitMQ instance has too many connections (> 1000) + # VALUE = {{ "{{ $value }}" }}\n LABELS: {{ "{{ $labels }}" }} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an +## unknown order. +## ref: https://www.rabbitmq.com/rabbitmqctl.8.html#force_boot +## +forceBoot: + enabled: false + +## Optionally specify extra secrets to be created by the chart. +## This can be useful when combined with load_definitions to automatically create the secret containing the definitions to be loaded. +## +extraSecrets: {} + # load-definition: + # load_definition.json: | + # { + # ... + # } From 85c417618d3d88e8efa300478741a7ca53e5df93 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 26 Feb 2026 08:19:27 -0500 Subject: [PATCH 45/47] Remove uneccessary volume permissions and add new role for ISRA. --- .../rabbitmq_values/rabbitmq.yaml | 2 +- terraform/modules/eks/main.tf | 49 +++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml index 85e2470..35ec47a 100644 --- a/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml +++ b/terraform-k8s-infrastructure/modules/k8s_data_layer/rabbitmq_values/rabbitmq.yaml @@ -415,7 +415,7 @@ metrics: ## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup ## volumePermissions: - enabled: true + enabled: false ## forceBoot: executes 'rabbitmqctl force_boot' to force boot cluster shut down unexpectedly in an ## unknown order. diff --git a/terraform/modules/eks/main.tf b/terraform/modules/eks/main.tf index 8e0f701..9d757e9 100644 --- a/terraform/modules/eks/main.tf +++ b/terraform/modules/eks/main.tf @@ -305,4 +305,53 @@ resource "aws_eks_access_policy_association" "gha_policy" { depends_on = [ aws_eks_access_entry.gha_role ] +} + +# Default pod Service Account role and policy +resource "aws_iam_role" "default_irsa" { + name = "default-irsa-${var.environment}" + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Federated = aws_iam_openid_connect_provider.example.arn + } + Action = "sts:AssumeRoleWithWebIdentity" + Condition = { + StringEquals = { + "${replace(aws_eks_cluster.eks_cluster.identity[0].oidc[0].issuer, "https://", "")}:sub" = "system:serviceaccount:core:default" + } + } + } + ] + }) +} + +resource "aws_iam_policy" "default_sa_policy" { + name = "default-sa-policy-${var.environment}" + description = "" + + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogStreams" + ] + Resource = "arn:aws:logs:us-east-1:*:log-group:*" + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "authorization_attach" { + role = aws_iam_role.default_irsa.name + policy_arn = aws_iam_policy.default_irsa_policy.arn } \ No newline at end of file From 2c01853a47a83d98c3cbada3f415a04f1cde974f Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 26 Feb 2026 11:40:32 -0500 Subject: [PATCH 46/47] Fix resource names. --- terraform/modules/eks/main.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/terraform/modules/eks/main.tf b/terraform/modules/eks/main.tf index 9d757e9..695a615 100644 --- a/terraform/modules/eks/main.tf +++ b/terraform/modules/eks/main.tf @@ -308,7 +308,7 @@ resource "aws_eks_access_policy_association" "gha_policy" { } # Default pod Service Account role and policy -resource "aws_iam_role" "default_irsa" { +resource "aws_iam_role" "default_sa_role" { name = "default-irsa-${var.environment}" assume_role_policy = jsonencode({ @@ -352,6 +352,6 @@ resource "aws_iam_policy" "default_sa_policy" { } resource "aws_iam_role_policy_attachment" "authorization_attach" { - role = aws_iam_role.default_irsa.name - policy_arn = aws_iam_policy.default_irsa_policy.arn + role = aws_iam_role.default_sa_role.name + policy_arn = aws_iam_policy.default_sa_policy.arn } \ No newline at end of file From c994c2b17b14519852f458de77fe875bff4a4a75 Mon Sep 17 00:00:00 2001 From: Tim Anderegg Date: Thu, 26 Feb 2026 16:44:45 -0500 Subject: [PATCH 47/47] Adds GH OIDC docs and updates staging TF vars. --- README.md | 3 +++ terraform/.terraform.lock.hcl | 5 +++++ terraform/vars/terraform-staging.tfvars | 2 ++ 3 files changed, 10 insertions(+) diff --git a/README.md b/README.md index 67d7ada..cdbfe0e 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,9 @@ For a description of the setup, see the infrastructure [section](https://resource-watch.github.io/doc-api/developer.html#infrastructure-configuration) of the developer documentation. +# Github Actions +Github Actions (GHA) has been setup to run `terraform plan` when a PR is opened to either the `dev`, `staging`, or `production` (TODO) branches, and `terraform apply` when the PR is merged. This makes use of an OIDC role as described here: https://docs.github.com/en/actions/how-tos/secure-your-work/security-harden-deployments/oidc-in-aws. The role for each environment was created manually, and is specified using the `gha_role_arn` Terraform variable. + ## Setting up the AWS resources To setup the cluster cloud resources, use the following command: diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl index 12d5d43..86f27bb 100644 --- a/terraform/.terraform.lock.hcl +++ b/terraform/.terraform.lock.hcl @@ -28,6 +28,7 @@ provider "registry.terraform.io/hashicorp/archive" { version = "2.7.1" hashes = [ "h1:62VrkalDPMKB9zerCBS4iKTbvxejwnAWn/XXYZZQWD4=", + "h1:Tr6LvLbm30zX4BRNPHhXo8SnOP0vg5UKAeunRNfnas8=", "zh:19881bb356a4a656a865f48aee70c0b8a03c35951b7799b6113883f67f196e8e", "zh:2fcfbf6318dd514863268b09bbe19bfc958339c636bcbcc3664b45f2b8bf5cc6", "zh:3323ab9a504ce0a115c28e64d0739369fe85151291a2ce480d51ccbb0c381ac5", @@ -47,6 +48,7 @@ provider "registry.terraform.io/hashicorp/aws" { version = "6.16.0" constraints = ">= 4.48.0" hashes = [ + "h1:5k5PC0+AvCr/8XS7sujVYDApF3GybTVcjh6HMhB8pIc=", "h1:eBjQq1U3AZ+mkEgE6cC8z6Qw4DIV23tNmM8tCcuqXuk=", "zh:11b5c11fc47aa7537d3acfb3156c9206ce465c2c0db4478914d1ba9493a27f38", "zh:1de5c4ef8096ab6a4fe8e528c5a1d772a57de74ef4de98996071987d0d6a7696", @@ -70,6 +72,7 @@ provider "registry.terraform.io/hashicorp/random" { version = "3.7.2" hashes = [ "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", + "h1:hkKSY5xI4R1H4Yrg10HHbtOoxZif2dXa9HFPSbaVg5o=", "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", @@ -108,6 +111,7 @@ provider "registry.terraform.io/hashicorp/tls" { version = "4.1.0" hashes = [ "h1:Ka8mEwRFXBabR33iN/WTIEW6RP0z13vFsDlwn11Pf2I=", + "h1:uDtqTpFJOseNUlPDx4TT/lXf6ie3CarsimL7sYCiVH4=", "zh:14c35d89307988c835a7f8e26f1b83ce771e5f9b41e407f86a644c0152089ac2", "zh:2fb9fe7a8b5afdbd3e903acb6776ef1be3f2e587fb236a8c60f11a9fa165faa8", "zh:35808142ef850c0c60dd93dc06b95c747720ed2c40c89031781165f0c2baa2fc", @@ -127,6 +131,7 @@ provider "registry.terraform.io/surveymonkey/sparkpost" { version = "0.2.12" constraints = "~> 0.2.7" hashes = [ + "h1:Pm35ZB6Sngf/gO0Hzweoda5UkOA/H9Z1MtKAHwLNAnI=", "h1:QJIz0k/WK2MKCXTNXrtnxrKJA0PH4gzWOY3NP/VKc8M=", "zh:1631fa3670507300d12cb256768c6270775e8cd7fc15159beef8c80ffad98d82", "zh:2063c3106b05fd25d851cf2577d0e9a7a9effd82823c13fb71d396d8dd12910c", diff --git a/terraform/vars/terraform-staging.tfvars b/terraform/vars/terraform-staging.tfvars index bf6ba36..b0d6613 100644 --- a/terraform/vars/terraform-staging.tfvars +++ b/terraform/vars/terraform-staging.tfvars @@ -13,6 +13,7 @@ db_logs_exports = ["audit", "profiler"] eks_version = "1.29" eks_node_release_version = "1.29.0-20240202" ebs_csi_addon_version = "v1.27.0-eksbuild.1" +kube_proxy_addon_version = "v1.29.15-eksbuild.28" gateway_node_group_desired_size = 0 hibernate = false @@ -33,3 +34,4 @@ mongodb_apps_node_group_instance_types = ["r5a.large", "r5.large"] aq_bucket_cors_allowed_origin = "*" deploy_sparkpost_templates = false +gha_role_arn = "arn:aws:iam::843801476059:role/wri-api-staging-githubactions-role" \ No newline at end of file