Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions .github/workflows/checkov.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
name: "Checkov GitHub Action"
on:
pull_request:
branches: [test, dev, qa, prod, main]

jobs:
checkov:
name: checkov
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: "recursive"
token: ${{ secrets.ENGINEERING_GITHUB_PERSONAL_ACCESS_TOKEN }}
- name: Run Checkov
id: checkov
uses: bridgecrewio/checkov-action@master
with:
download_external_modules: true # optional: download external terraform modules from public git repositories and terraform registry
quiet: true # optional: display only failed checks
13 changes: 12 additions & 1 deletion .github/workflows/terraform.yml
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ jobs:
id: plan
run: terraform plan -no-color
continue-on-error: true
- uses: actions/github-script@v4
- name: PR Comment
uses: actions/github-script@v4
if: github.event_name == 'pull_request'
env:
PLAN: "terraform\n${{ steps.plan.outputs.stdout }}"
Expand All @@ -73,3 +74,13 @@ jobs:
repo: context.repo.repo,
body: output
})
- name: Release
if: github.event_name == 'push' && github.base_ref == 'main'
uses: cycjimmy/semantic-release-action@v2
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
extra_plugins: |
@semantic-release/[email protected]
@semantic-release/[email protected]
@semantic-release/[email protected]
9 changes: 0 additions & 9 deletions .github/workflows/terratest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,3 @@ jobs:
run: |
cd test
go test -v -timeout 30m
- name: release
uses: cycjimmy/semantic-release-action@v2
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
extra_plugins: |
@semantic-release/[email protected]
@semantic-release/[email protected]
@semantic-release/[email protected]
39 changes: 28 additions & 11 deletions terraform/iam.tf
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,11 @@ data "aws_iam_policy_document" "cluster_autoscaler_infrastructure_access_policy_
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
]
effect = "Allow"
effect = "Allow"

# AWS examples also use ["*"] for resources:
# https://docs.aws.amazon.com/autoscaling/plans/userguide/security_iam_id-based-policy-examples.html
# checkov:skip=CKV_AWS_111:Ensure IAM policies does not allow write access without constraint
resources = ["*"]
}
}
Expand Down Expand Up @@ -445,12 +449,8 @@ resource "aws_iam_role_policy_attachment" "external_secrets_monitoring_role_poli
# IAM User for Pipelines Bucket
# ######################################################################################################################

resource "aws_iam_user" "kubeflow_pipelines_user" {
name = "kubeflow-pipelines"

tags = {
Terraform = "true"
}
resource "aws_iam_group" "kubeflow_pipelines_users_group" {
name = "kubeflow_pipelines_users_group"
}

data "aws_iam_policy_document" "s3_access_policy_document" {
Expand All @@ -472,13 +472,30 @@ data "aws_iam_policy_document" "s3_access_policy_document" {
}
}

resource "aws_iam_user_policy" "kubeflow_pipelines_user_policy" {
name = "kubeflow_pipelines_user_policy"
user = aws_iam_user.kubeflow_pipelines_user.name

resource "aws_iam_policy" "kubeflow_pipelines_s3_access_policy" {
name = "kubeflow_pipelines_s3_access_policy"
policy = data.aws_iam_policy_document.s3_access_policy_document.json
}

resource "aws_iam_group_policy_attachment" "kubeflow_pipelines_users_group_policy_attachment" {
group = aws_iam_group.kubeflow_pipelines_users_group.name
policy_arn = aws_iam_policy.kubeflow_pipelines_s3_access_policy.arn
}

resource "aws_iam_user" "kubeflow_pipelines_user" {
name = "kubeflow-pipelines"

tags = {
Terraform = "true"
}
}

resource "aws_iam_group_membership" "kubeflow_pipelines_user_group_membership" {
name = "kubeflow_pipelines_user_group_membership"
users = toset([aws_iam_user.kubeflow_pipelines_user.name])
group = aws_iam_group.kubeflow_pipelines_users_group.name
}

resource "aws_iam_access_key" "kubeflow_pipelines_user_credentials" {
user = aws_iam_user.kubeflow_pipelines_user.name
}
51 changes: 46 additions & 5 deletions terraform/storage.tf
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,41 @@ resource "aws_db_subnet_group" "kubeflow_db_subnet_group" {
subnet_ids = data.aws_subnet.kubeflow_db_subnets.*.id
}

################################################################################
# Create an IAM role to allow enhanced monitoring
################################################################################

resource "aws_iam_role" "rds_enhanced_monitoring" {
name_prefix = "rds-enhanced-monitoring-"
assume_role_policy = data.aws_iam_policy_document.rds_enhanced_monitoring.json
}

resource "aws_iam_role_policy_attachment" "rds_enhanced_monitoring" {
role = aws_iam_role.rds_enhanced_monitoring.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole"
}

data "aws_iam_policy_document" "rds_enhanced_monitoring" {
statement {
actions = [
"sts:AssumeRole",
]

effect = "Allow"

principals {
type = "Service"
identifiers = ["monitoring.rds.amazonaws.com"]
}
}
}

resource "aws_db_instance" "kubeflow_db" {

name = "kubeflow"
instance_class = "db.t3.micro"
multi_az = false
name = "kubeflow"
instance_class = "db.t3.micro"
multi_az = var.stage == "prod" ? true : false
deletion_protection = true

### Engine
### ---
Expand All @@ -26,8 +56,9 @@ resource "aws_db_instance" "kubeflow_db" {
### Auth
### ---
# TODO: randomize or secure this somehow
username = "kubeflow"
password = "kubeflow" #tfsec:ignore:GEN003
username = "kubeflow"
password = "kubeflow" #tfsec:ignore:GEN003
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Local exec provisioner to run an atomic password changing script via a null resource that depends on this kubeflow_db resource is I think the cleanest way.

iam_database_authentication_enabled = true

### Storage
### ---
Expand All @@ -43,6 +74,12 @@ resource "aws_db_instance" "kubeflow_db" {
# Not sure where to get this in real-life
vpc_security_group_ids = [var.aws_eks_cluster_primary_security_group_id]
db_subnet_group_name = aws_db_subnet_group.kubeflow_db_subnet_group.name

### Logging
### ---
enabled_cloudwatch_logs_exports = ["audit", "error", "general", "slowquery"]
monitoring_interval = 60
monitoring_role_arn = aws_iam_role.rds_enhanced_monitoring.arn
}

# ######################################################################################################################
Expand All @@ -67,6 +104,10 @@ resource "aws_elasticache_cluster" "kubeflow_oidc_cache" {

security_group_ids = [var.aws_eks_cluster_primary_security_group_id]
subnet_group_name = aws_elasticache_subnet_group.kubeflow_oidc_cache_subnet_group.name

### Backups
### ---
snapshot_retention_limit = 5
}

# ######################################################################################################################
Expand Down