diff --git a/.github/actions/build_ami/action.yaml b/.github/actions/build_ami/action.yaml
index 7390f9c45..a476894f4 100644
--- a/.github/actions/build_ami/action.yaml
+++ b/.github/actions/build_ami/action.yaml
@@ -59,7 +59,7 @@ runs:
uses: actions/checkout@v4
- name: Get EIF for Release ${{ inputs.operator_release }}
- uses: IABTechLab/uid2-operator/.github/actions/download_release_artifact@main
+ uses: ./.github/actions/download_release_artifact
if: ${{ inputs.operator_release != '' }}
with:
github_token: ${{ inputs.github_token }}
diff --git a/.github/actions/build_aws_eif/action.yaml b/.github/actions/build_aws_eif/action.yaml
index f17523a44..08e6d6604 100644
--- a/.github/actions/build_aws_eif/action.yaml
+++ b/.github/actions/build_aws_eif/action.yaml
@@ -96,8 +96,9 @@ runs:
cp ${{ steps.buildFolder.outputs.BUILD_FOLDER }}/identity_scope.txt ${ARTIFACTS_OUTPUT_DIR}/
cp ${{ steps.buildFolder.outputs.BUILD_FOLDER }}/version_number.txt ${ARTIFACTS_OUTPUT_DIR}/
- cp ./scripts/aws/start.sh ${ARTIFACTS_OUTPUT_DIR}/
- cp ./scripts/aws/stop.sh ${ARTIFACTS_OUTPUT_DIR}/
+ cp ./scripts/aws/ec2.py ${ARTIFACTS_OUTPUT_DIR}/
+ cp ./scripts/confidential_compute.py ${ARTIFACTS_OUTPUT_DIR}/
+ cp ./scripts/aws/requirements.txt ${ARTIFACTS_OUTPUT_DIR}/
cp ./scripts/aws/proxies.host.yaml ${ARTIFACTS_OUTPUT_DIR}/
cp ./scripts/aws/sockd.conf ${ARTIFACTS_OUTPUT_DIR}/
cp ./scripts/aws/uid2operator.service ${ARTIFACTS_OUTPUT_DIR}/
diff --git a/.github/actions/build_eks_docker_image/action.yaml b/.github/actions/build_eks_docker_image/action.yaml
index 1a7bca316..922136c5d 100644
--- a/.github/actions/build_eks_docker_image/action.yaml
+++ b/.github/actions/build_eks_docker_image/action.yaml
@@ -47,7 +47,7 @@ runs:
mkdir ${{ inputs.artifacts_output_dir }} -p
- name: Get EIF for Release ${{ inputs.operator_release }}
- uses: IABTechLab/uid2-operator/.github/actions/download_release_artifact@main
+ uses: ./.github/actions/download_release_artifact
if: ${{ inputs.operator_release != '' }}
with:
github_token: ${{ inputs.github_token }}
diff --git a/.github/actions/install_az_cli/action.yaml b/.github/actions/install_az_cli/action.yaml
new file mode 100644
index 000000000..19bdb382c
--- /dev/null
+++ b/.github/actions/install_az_cli/action.yaml
@@ -0,0 +1,36 @@
+name: 'Install Azure CLI'
+description: 'Install Azure CLI'
+runs:
+ using: 'composite'
+ steps:
+ - name: uninstall azure-cli
+ shell: bash
+ run: |
+ sudo apt-get remove -y azure-cli
+
+ - name: install azure-cli 2.61.0
+ shell: bash
+ run: |
+ sudo apt-get update
+ sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
+ sudo mkdir -p /etc/apt/keyrings
+ curl -sLS https://packages.microsoft.com/keys/microsoft.asc |
+ gpg --dearmor | sudo tee /etc/apt/keyrings/microsoft.gpg > /dev/null
+ sudo chmod go+r /etc/apt/keyrings/microsoft.gpg
+ AZ_DIST=$(lsb_release -cs)
+ echo "Types: deb
+ URIs: https://packages.microsoft.com/repos/azure-cli/
+ Suites: ${AZ_DIST}
+ Components: main
+ Architectures: $(dpkg --print-architecture)
+ Signed-by: /etc/apt/keyrings/microsoft.gpg" | sudo tee /etc/apt/sources.list.d/azure-cli.sources
+ sudo apt-get update
+ sudo apt-get install azure-cli
+
+ apt-cache policy azure-cli
+ # Obtain the currently installed distribution
+ AZ_DIST=$(lsb_release -cs)
+ # Store an Azure CLI version of choice
+ AZ_VER=2.61.0
+ # Install a specific version
+ sudo apt-get install azure-cli=${AZ_VER}-1~${AZ_DIST} --allow-downgrades
diff --git a/.github/actions/update_operator_version/action.yaml b/.github/actions/update_operator_version/action.yaml
index 1c66838e8..91cd54ff0 100644
--- a/.github/actions/update_operator_version/action.yaml
+++ b/.github/actions/update_operator_version/action.yaml
@@ -43,7 +43,7 @@ runs:
uses: trstringer/manual-approval@v1
with:
secret: ${{ github.token }}
- approvers: thomasm-ttd,atarassov-ttd,cody-constine-ttd
+ approvers: atarassov-ttd,vishalegbert-ttd,sunnywu,cody-constine-ttd
minimum-approvals: 1
issue-title: Creating Major version of UID2-Operator
diff --git a/.github/workflows/build-and-test.yaml b/.github/workflows/build-and-test.yaml
index 00695f1db..aa13387c6 100644
--- a/.github/workflows/build-and-test.yaml
+++ b/.github/workflows/build-and-test.yaml
@@ -3,7 +3,7 @@ on: [pull_request, push, workflow_dispatch]
jobs:
build:
- uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-build-and-test.yaml@v2
+ uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-build-and-test.yaml@v3
with:
java_version: 21
secrets: inherit
\ No newline at end of file
diff --git a/.github/workflows/build-uid2-ami.yaml b/.github/workflows/build-uid2-ami.yaml
index a6c3143da..8439b33c6 100644
--- a/.github/workflows/build-uid2-ami.yaml
+++ b/.github/workflows/build-uid2-ami.yaml
@@ -42,7 +42,7 @@ jobs:
- name: Build UID2 Operator AMI
id: buildAMI
- uses: IABTechLab/uid2-operator/.github/actions/build_ami@main
+ uses: ./.github/actions/build_ami
with:
identity_scope: uid2
eif_repo_owner: ${{ env.REPO_OWNER }}
@@ -92,7 +92,7 @@ jobs:
- name: Build EUID Operator AMI
id: buildAMI
- uses: IABTechLab/uid2-operator/.github/actions/build_ami@main
+ uses: ./.github/actions/build_ami
with:
identity_scope: euid
eif_repo_owner: ${{ env.REPO_OWNER }}
diff --git a/.github/workflows/publish-all-operators.yaml b/.github/workflows/publish-all-operators.yaml
index c5db3a3b0..6b0198060 100644
--- a/.github/workflows/publish-all-operators.yaml
+++ b/.github/workflows/publish-all-operators.yaml
@@ -1,5 +1,5 @@
name: Publish All Operators
-run-name: ${{ format('Publish All Operators - {0} Release', inputs.release_type) }}
+run-name: ${{ format('Publish All Operators - {0} Release', github.event.inputs.release_type || 'scheduled') }}
on:
workflow_dispatch:
inputs:
@@ -18,6 +18,8 @@ on:
- CRITICAL,HIGH
- CRITICAL,HIGH,MEDIUM
- CRITICAL (DO NOT use if JIRA ticket not raised)
+ schedule:
+ - cron: "0 0 * * *"
jobs:
start:
@@ -26,13 +28,25 @@ jobs:
outputs:
new_version: ${{ steps.version.outputs.new_version }}
commit_sha: ${{ steps.commit-and-tag.outputs.commit_sha }}
+ release_type: ${{ steps.set-env.outputs.release_type }}
+ vulnerability_severity: ${{ steps.set-env.outputs.vulnerability_severity }}
+ env:
+ RELEASE_TYPE: ${{ inputs.release_type || (github.event_name == 'schedule' && 'patch') }}
+ VULNERABILITY_SEVERITY: ${{ inputs.vulnerability_severity || (github.event_name == 'schedule' && 'CRITICAL,HIGH') }}
steps:
+ - name: Set Environment Variables
+ id: set-env
+ run: |
+ echo "release_type=${{ inputs.release_type || (github.event_name == 'schedule' && 'patch') }}" >> $GITHUB_ENV
+ echo "vulnerability_severity=${{ inputs.vulnerability_severity || (github.event_name == 'schedule' && 'CRITICAL,HIGH') }}" >> $GITHUB_ENV
+ echo "release_type=${RELEASE_TYPE}" >> $GITHUB_OUTPUT
+ echo "vulnerability_severity=${VULNERABILITY_SEVERITY}" >> $GITHUB_OUTPUT
- name: Approve Major release
- if: inputs.release_type == 'Major'
+ if: env.RELEASE_TYPE == 'Major'
uses: trstringer/manual-approval@v1
with:
secret: ${{ github.token }}
- approvers: thomasm-ttd,atarassov-ttd,cody-constine-ttd
+ approvers: atarassov-ttd,vishalegbert-ttd,sunnywu,cody-constine-ttd
minimum-approvals: 1
issue-title: Creating Major version of UID2-Operator
@@ -55,7 +69,7 @@ jobs:
fetch-depth: 0
- name: Scan vulnerabilities
- uses: IABTechLab/uid2-shared-actions/actions/vulnerability_scan_filesystem@v2
+ uses: IABTechLab/uid2-shared-actions/actions/vulnerability_scan_filesystem@v3
with:
scan_severity: HIGH,CRITICAL
failure_severity: CRITICAL
@@ -64,7 +78,7 @@ jobs:
id: version
uses: IABTechLab/uid2-shared-actions/actions/version_number@v2
with:
- type: ${{ inputs.release_type }}
+ type: ${{ env.RELEASE_TYPE }}
branch_name: ${{ github.ref }}
- name: Update pom.xml
@@ -79,7 +93,7 @@ jobs:
uses: IABTechLab/uid2-shared-actions/actions/commit_pr_and_merge@v3
with:
add: 'pom.xml version.json'
- message: 'Released ${{ inputs.release_type }} version: ${{ steps.version.outputs.new_version }}'
+ message: 'Released ${{ env.RELEASE_TYPE }} version: ${{ steps.version.outputs.new_version }}'
tag: v${{ steps.version.outputs.new_version }}
buildPublic:
@@ -87,9 +101,9 @@ jobs:
needs: start
uses: ./.github/workflows/publish-public-operator-docker-image.yaml
with:
- release_type: ${{ inputs.release_type }}
+ release_type: ${{ needs.start.outputs.release_type }}
version_number_input: ${{ needs.start.outputs.new_version }}
- vulnerability_severity: ${{ inputs.vulnerability_severity }}
+ vulnerability_severity: ${{ needs.start.outputs.vulnerability_severity }}
secrets: inherit
buildGCP:
@@ -97,10 +111,10 @@ jobs:
needs: start
uses: ./.github/workflows/publish-gcp-oidc-enclave-docker.yaml
with:
- release_type: ${{ inputs.release_type }}
+ release_type: ${{ needs.start.outputs.release_type }}
version_number_input: ${{ needs.start.outputs.new_version }}
commit_sha: ${{ needs.start.outputs.commit_sha }}
- vulnerability_severity: ${{ inputs.vulnerability_severity }}
+ vulnerability_severity: ${{ needs.start.outputs.vulnerability_severity }}
secrets: inherit
buildAzure:
@@ -108,10 +122,10 @@ jobs:
needs: start
uses: ./.github/workflows/publish-azure-cc-enclave-docker.yaml
with:
- release_type: ${{ inputs.release_type }}
+ release_type: ${{ needs.start.outputs.release_type }}
version_number_input: ${{ needs.start.outputs.new_version }}
commit_sha: ${{ needs.start.outputs.commit_sha }}
- vulnerability_severity: ${{ inputs.vulnerability_severity }}
+ vulnerability_severity: ${{ needs.start.outputs.vulnerability_severity }}
secrets: inherit
buildAWS:
@@ -119,7 +133,7 @@ jobs:
needs: start
uses: ./.github/workflows/publish-aws-nitro-eif.yaml
with:
- release_type: ${{ inputs.release_type }}
+ release_type: ${{ needs.start.outputs.release_type }}
version_number_input: ${{ needs.start.outputs.new_version }}
commit_sha: ${{ needs.start.outputs.commit_sha }}
secrets: inherit
@@ -132,18 +146,11 @@ jobs:
operator_run_number: ${{ github.run_id }}
secrets: inherit
- buildEKS:
- name: Build AWS EKS Docker
- needs: [start, buildAWS]
- uses: ./.github/workflows/publish-aws-eks-nitro-enclave-docker.yaml
- with:
- operator_run_number: ${{ github.run_id }}
- secrets: inherit
-
createRelease:
name: Create Release
runs-on: ubuntu-latest
- needs: [start, buildPublic, buildGCP, buildAzure, buildAWS, buildAMI, buildEKS]
+ if: github.event_name == 'workflow_dispatch'
+ needs: [start, buildPublic, buildGCP, buildAzure, buildAWS, buildAMI]
steps:
- name: Checkout repo
uses: actions/checkout@v4
@@ -162,12 +169,18 @@ jobs:
pattern: gcp-oidc-enclave-ids-*
path: ./manifests/gcp_oidc_operator
- - name: Download Azure manifest
+ - name: Download Azure CC manifest
uses: actions/download-artifact@v4
with:
pattern: azure-cc-enclave-id-*
path: ./manifests/azure_cc_operator
+ - name: Download Azure AKS manifest
+ uses: actions/download-artifact@v4
+ with:
+ pattern: azure-aks-enclave-id-*
+ path: ./manifests/azure_aks_operator
+
- name: Download EIF manifest
uses: actions/download-artifact@v4
with:
@@ -180,12 +193,6 @@ jobs:
pattern: 'aws-ami-ids-*'
path: ./manifests/aws_ami
- - name: Download AWS EKS manifest
- uses: actions/download-artifact@v4
- with:
- pattern: 'aws-eks-enclave-ids-*'
- path: ./manifests/aws_eks
-
- name: Download Deployment Files
uses: actions/download-artifact@v4
with:
@@ -216,6 +223,7 @@ jobs:
(cd ./deployment/aws-euid-deployment-files-${{ needs.start.outputs.new_version }} && zip -r ../../aws-euid-deployment-files-${{ needs.start.outputs.new_version }}.zip . )
(cd ./deployment/aws-uid2-deployment-files-${{ needs.start.outputs.new_version }} && zip -r ../../aws-uid2-deployment-files-${{ needs.start.outputs.new_version }}.zip . )
(cd ./deployment/azure-cc-deployment-files-${{ needs.start.outputs.new_version }} && zip -r ../../azure-cc-deployment-files-${{ needs.start.outputs.new_version }}.zip . )
+ (cd ./deployment/azure-aks-deployment-files-${{ needs.start.outputs.new_version }} && zip -r ../../azure-aks-deployment-files-${{ needs.start.outputs.new_version }}.zip . )
(cd ./deployment/gcp-oidc-deployment-files-${{ needs.start.outputs.new_version }} && zip -r ../../gcp-oidc-deployment-files-${{ needs.start.outputs.new_version }}.zip . )
(cd manifests && zip -r ../uid2-operator-release-manifests-${{ needs.start.outputs.new_version }}.zip .)
@@ -229,5 +237,19 @@ jobs:
./aws-euid-deployment-files-${{ needs.start.outputs.new_version }}.zip
./aws-uid2-deployment-files-${{ needs.start.outputs.new_version }}.zip
./azure-cc-deployment-files-${{ needs.start.outputs.new_version }}.zip
+ ./azure-aks-deployment-files-${{ needs.start.outputs.new_version }}.zip
./gcp-oidc-deployment-files-${{ needs.start.outputs.new_version }}.zip
./uid2-operator-release-manifests-${{ needs.start.outputs.new_version }}.zip
+ notifyFailure:
+ name: Notify Slack on Failure
+ runs-on: ubuntu-latest
+ if: failure() && github.ref == 'refs/heads/main'
+ needs: [start, buildPublic, buildGCP, buildAzure, buildAWS, buildAMI]
+ steps:
+ - name: Send Slack Alert
+ env:
+ SLACK_COLOR: danger
+ SLACK_MESSAGE: ':x: Operator Pipeline failed'
+ SLACK_TITLE: Pipeline Failed in ${{ github.workflow }}
+ SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
+ uses: rtCamp/action-slack-notify@v2
diff --git a/.github/workflows/publish-aws-eks-nitro-enclave-docker.yaml b/.github/workflows/publish-aws-eks-nitro-enclave-docker.yaml
index eb602b422..0de600aac 100644
--- a/.github/workflows/publish-aws-eks-nitro-enclave-docker.yaml
+++ b/.github/workflows/publish-aws-eks-nitro-enclave-docker.yaml
@@ -1,4 +1,4 @@
-name: Publish EKS Operator Docker Images
+name: Publish EKS Enclave Operator Docker Images
run-name: >-
${{ inputs.operator_release == '' && format('Publish EKS Operator Docker Images for Operator Run Number: {0}', inputs.operator_run_number) || format('Publish EKS Operator Docker Images for Operator Release: {0}', inputs.operator_release)}}
on:
@@ -36,9 +36,12 @@ jobs:
security-events: write
packages: write
steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
- name: Build Docker Image for EKS Pod
id: build_docker_image_uid
- uses: IABTechLab/uid2-operator/.github/actions/build_eks_docker_image@main
+ uses: ./.github/actions/build_eks_docker_image
with:
identity_scope: uid2
artifacts_output_dir: ${{ env.ARTIFACTS_BASE_OUTPUT_DIR }}/uid2
@@ -61,9 +64,12 @@ jobs:
security-events: write
packages: write
steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
- name: Build Docker Image for EKS Pod
id: build_docker_image_euid
- uses: IABTechLab/uid2-operator/.github/actions/build_eks_docker_image@main
+ uses: ./.github/actions/build_eks_docker_image
with:
identity_scope: euid
artifacts_output_dir: ${{ env.ARTIFACTS_BASE_OUTPUT_DIR }}/euid
diff --git a/.github/workflows/publish-aws-nitro-eif.yaml b/.github/workflows/publish-aws-nitro-eif.yaml
index 8783f6829..3c599c663 100644
--- a/.github/workflows/publish-aws-nitro-eif.yaml
+++ b/.github/workflows/publish-aws-nitro-eif.yaml
@@ -48,9 +48,12 @@ jobs:
env:
GITHUB_CONTEXT: ${{ toJson(github) }}
+ - name: Checkout
+ uses: actions/checkout@v4
+
- name: Update Operator Version
id: update_version
- uses: IABTechLab/uid2-operator/.github/actions/update_operator_version@main
+ uses: ./.github/actions/update_operator_version
with:
release_type: ${{ inputs.release_type }}
version_number_input: ${{ inputs.version_number_input }}
@@ -68,9 +71,12 @@ jobs:
runs-on: ubuntu-latest
needs: start
steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
- name: Build UID2 AWS EIF
id: build_uid2_eif
- uses: IABTechLab/uid2-operator/.github/actions/build_aws_eif@main
+ uses: ./.github/actions/build_aws_eif
with:
identity_scope: uid2
artifacts_base_output_dir: ${{ env.ARTIFACTS_BASE_OUTPUT_DIR }}/uid2
@@ -104,9 +110,12 @@ jobs:
runs-on: ubuntu-latest
needs: start
steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
- name: Build EUID AWS EIF
id: build_euid_eif
- uses: IABTechLab/uid2-operator/.github/actions/build_aws_eif@main
+ uses: ./.github/actions/build_aws_eif
with:
identity_scope: euid
artifacts_base_output_dir: ${{ env.ARTIFACTS_BASE_OUTPUT_DIR }}/euid
diff --git a/.github/workflows/publish-azure-cc-enclave-docker.yaml b/.github/workflows/publish-azure-cc-enclave-docker.yaml
index 0127a71f4..15064f94a 100644
--- a/.github/workflows/publish-azure-cc-enclave-docker.yaml
+++ b/.github/workflows/publish-azure-cc-enclave-docker.yaml
@@ -69,10 +69,16 @@ jobs:
outputs:
jar_version: ${{ steps.update_version.outputs.new_version }}
image_tag: ${{ steps.update_version.outputs.image_tag }}
+ is_release: ${{ steps.update_version.outputs.is_release }}
+ docker_version: ${{ steps.meta.outputs.version }}
+ tags: ${{ steps.meta.outputs.tags }}
steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
- name: Update Operator Version
id: update_version
- uses: IABTechLab/uid2-operator/.github/actions/update_operator_version@main
+ uses: ./.github/actions/update_operator_version
with:
release_type: ${{ inputs.release_type }}
version_number_input: ${{ inputs.version_number_input }}
@@ -92,6 +98,7 @@ jobs:
echo "jar_version=$(mvn help:evaluate -Dexpression=project.version | grep -e '^[1-9][^\[]')" >> $GITHUB_OUTPUT
echo "git_commit=$(git show --format="%h" --no-patch)" >> $GITHUB_OUTPUT
cp -r target ${{ env.DOCKER_CONTEXT_PATH }}/
+ cp scripts/confidential_compute.py ${{ env.DOCKER_CONTEXT_PATH }}/
- name: Log in to the Docker container registry
uses: docker/login-action@v3
@@ -158,35 +165,17 @@ jobs:
JAR_VERSION=${{ steps.update_version.outputs.new_version }}
IMAGE_VERSION=${{ steps.update_version.outputs.new_version }}
- - name: uninstall azure-cli
- run: |
- sudo apt-get remove -y azure-cli
-
- - name: install azure-cli 2.61.0
- run: |
- sudo apt-get update
- sudo apt-get install apt-transport-https ca-certificates curl gnupg lsb-release
- sudo mkdir -p /etc/apt/keyrings
- curl -sLS https://packages.microsoft.com/keys/microsoft.asc |
- gpg --dearmor | sudo tee /etc/apt/keyrings/microsoft.gpg > /dev/null
- sudo chmod go+r /etc/apt/keyrings/microsoft.gpg
- AZ_DIST=$(lsb_release -cs)
- echo "Types: deb
- URIs: https://packages.microsoft.com/repos/azure-cli/
- Suites: ${AZ_DIST}
- Components: main
- Architectures: $(dpkg --print-architecture)
- Signed-by: /etc/apt/keyrings/microsoft.gpg" | sudo tee /etc/apt/sources.list.d/azure-cli.sources
- sudo apt-get update
- sudo apt-get install azure-cli
-
- apt-cache policy azure-cli
- # Obtain the currently installed distribution
- AZ_DIST=$(lsb_release -cs)
- # Store an Azure CLI version of choice
- AZ_VER=2.61.0
- # Install a specific version
- sudo apt-get install azure-cli=${AZ_VER}-1~${AZ_DIST} --allow-downgrades
+ azureCc:
+ name: Create Azure CC artifacts
+ runs-on: ubuntu-latest
+ permissions: {}
+ needs: buildImage
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install Azure CLI
+ uses: ./.github/actions/install_az_cli
- name: check azure-cli version
run: |
@@ -194,61 +183,80 @@ jobs:
- name: Generate Azure deployment artifacts
env:
- IMAGE: ${{ steps.meta.outputs.tags }}
+ IMAGE: ${{ needs.buildImage.outputs.tags }}
OUTPUT_DIR: ${{ env.ARTIFACTS_OUTPUT_DIR }}
MANIFEST_DIR: ${{ env.MANIFEST_OUTPUT_DIR }}
- VERSION_NUMBER: ${{ steps.update_version.outputs.new_version }}
+ VERSION_NUMBER: ${{ needs.buildImage.outputs.jar_version }}
run: |
bash ./scripts/azure-cc/deployment/generate-deployment-artifacts.sh
- name: Upload deployment artifacts
uses: actions/upload-artifact@v4
with:
- name: azure-cc-deployment-files-${{ steps.update_version.outputs.new_version }}
+ name: azure-cc-deployment-files-${{ needs.buildImage.outputs.jar_version }}
path: ${{ env.ARTIFACTS_OUTPUT_DIR }}
if-no-files-found: error
- name: Upload manifest
uses: actions/upload-artifact@v4
with:
- name: azure-cc-enclave-id-${{ steps.update_version.outputs.new_version }}
+ name: azure-cc-enclave-id-${{ needs.buildImage.outputs.jar_version }}
path: ${{ env.MANIFEST_OUTPUT_DIR }}
if-no-files-found: error
- - name: Generate release archive
- if: ${{ inputs.version_number_input == '' && steps.update_version.outputs.is_release == 'true' }}
+ e2eAzureCc:
+ name: E2E Azure CC
+ uses: ./.github/workflows/run-e2e-tests-on-operator.yaml
+ needs: [buildImage, azureCc]
+ with:
+ operator_type: azure
+ operator_image_version: ${{ needs.buildImage.outputs.image_tag }}
+ secrets: inherit
+
+ azureAks:
+ name: Create Azure AKS artifacts
+ runs-on: ubuntu-latest
+ permissions: {}
+ needs: buildImage
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install Azure CLI
+ uses: ./.github/actions/install_az_cli
+
+ - name: check azure-cli version
+ run: |
+ az --version
+
+ - name: Generate Azure deployment artifacts
+ env:
+ IMAGE: ${{ needs.buildImage.outputs.tags }}
+ OUTPUT_DIR: ${{ env.ARTIFACTS_OUTPUT_DIR }}
+ MANIFEST_DIR: ${{ env.MANIFEST_OUTPUT_DIR }}
+ VERSION_NUMBER: ${{ needs.buildImage.outputs.jar_version }}
run: |
- zip -j ${{ env.ARTIFACTS_OUTPUT_DIR }}/uid2-operator-deployment-artifacts-${{ steps.meta.outputs.version }}.zip ${{ env.ARTIFACTS_OUTPUT_DIR }}/*
+ bash ./scripts/azure-aks/deployment/generate-deployment-artifacts.sh
- - name: Build changelog
- id: github_release
- if: ${{ inputs.version_number_input == '' && steps.update_version.outputs.is_release == 'true' }}
- uses: mikepenz/release-changelog-builder-action@v4
+ - name: Upload deployment artifacts
+ uses: actions/upload-artifact@v4
with:
- configurationJson: |
- {
- "template": "#{{CHANGELOG}}\n## Installation\n```\ndocker pull ${{ steps.meta.outputs.tags }}\n```\n\n## Image reference to deploy: \n```\n${{ steps.update_version.outputs.image_tag }}\n```\n\n## Changelog\n#{{UNCATEGORIZED}}",
- "pr_template": " - #{{TITLE}} - ( PR: ##{{NUMBER}} )"
- }
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ name: azure-aks-deployment-files-${{ needs.buildImage.outputs.jar_version }}
+ path: ${{ env.ARTIFACTS_OUTPUT_DIR }}
+ if-no-files-found: error
- - name: Create release
- if: ${{ inputs.version_number_input == '' && steps.update_version.outputs.is_release == 'true' }}
- uses: softprops/action-gh-release@v2
+ - name: Upload manifest
+ uses: actions/upload-artifact@v4
with:
- name: ${{ steps.update_version.outputs.new_version }}
- body: ${{ steps.github_release.outputs.changelog }}
- draft: true
- files: |
- ${{ env.ARTIFACTS_OUTPUT_DIR }}/uid2-operator-deployment-artifacts-${{ steps.update_version.outputs.new_version }}.zip
- ${{ env.MANIFEST_OUTPUT_DIR }}/azure-cc-operator-digest-${{ steps.update_version.outputs.new_version }}.txt
-
- e2e:
- name: E2E
+ name: azure-aks-enclave-id-${{ needs.buildImage.outputs.jar_version }}
+ path: ${{ env.MANIFEST_OUTPUT_DIR }}
+ if-no-files-found: error
+
+ e2eAzureAks:
+ name: E2E Azure AKS
uses: ./.github/workflows/run-e2e-tests-on-operator.yaml
- needs: buildImage
+ needs: [buildImage, azureAks]
with:
- operator_type: azure
+ operator_type: aks
operator_image_version: ${{ needs.buildImage.outputs.image_tag }}
secrets: inherit
diff --git a/.github/workflows/publish-gcp-oidc-enclave-docker.yaml b/.github/workflows/publish-gcp-oidc-enclave-docker.yaml
index 9f042a916..02977f83d 100644
--- a/.github/workflows/publish-gcp-oidc-enclave-docker.yaml
+++ b/.github/workflows/publish-gcp-oidc-enclave-docker.yaml
@@ -71,9 +71,12 @@ jobs:
jar_version: ${{ steps.update_version.outputs.new_version }}
image_tag: ${{ steps.update_version.outputs.image_tag }}
steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
- name: Update Operator Version
id: update_version
- uses: IABTechLab/uid2-operator/.github/actions/update_operator_version@main
+ uses: ./.github/actions/update_operator_version
with:
release_type: ${{ inputs.release_type }}
version_number_input: ${{ inputs.version_number_input }}
@@ -93,6 +96,7 @@ jobs:
echo "jar_version=$(mvn help:evaluate -Dexpression=project.version | grep -e '^[1-9][^\[]')" >> $GITHUB_OUTPUT
echo "git_commit=$(git show --format="%h" --no-patch)" >> $GITHUB_OUTPUT
cp -r target ${{ env.DOCKER_CONTEXT_PATH }}/
+ cp scripts/confidential_compute.py ${{ env.DOCKER_CONTEXT_PATH }}/
- name: Log in to the Docker container registry
uses: docker/login-action@v3
@@ -155,31 +159,13 @@ jobs:
IMAGE_VERSION=${{ steps.update_version.outputs.new_version }}
BUILD_TARGET=${{ env.ENCLAVE_PROTOCOL }}
- - name: Generate Trivy vulnerability scan report
- uses: aquasecurity/trivy-action@0.14.0
- with:
- image-ref: ${{ steps.meta.outputs.tags }}
- format: 'sarif'
- exit-code: '0'
- ignore-unfixed: true
- severity: 'CRITICAL,HIGH'
- output: 'trivy-results.sarif'
- hide-progress: true
-
- - name: Upload Trivy scan report to GitHub Security tab
- uses: github/codeql-action/upload-sarif@v3
- with:
- sarif_file: 'trivy-results.sarif'
-
- - name: Test with Trivy vulnerability scanner
- uses: aquasecurity/trivy-action@0.14.0
+ - name: Vulnerability Scan
+ uses: IABTechLab/uid2-shared-actions/actions/vulnerability_scan@v3
with:
- image-ref: ${{ steps.meta.outputs.tags }}
- format: 'table'
- exit-code: '1'
- ignore-unfixed: true
- severity: ${{ inputs.vulnerability_severity }}
- hide-progress: true
+ image_ref: ${{ steps.meta.outputs.tags }}
+ scan_type: 'image'
+ skip_files: '/venv/lib/python3.12/site-packages/google/auth/crypt/__pycache__/_python_rsa.cpython-312.pyc' # Skip scanning this file as per UID2-4968
+ failure_severity: ${{ (inputs.vulnerability_severity == 'CRITICAL (DO NOT use if JIRA ticket not raised)' && 'CRITICAL') || inputs.vulnerability_severity }}
- name: Push to Docker
id: push-to-docker
diff --git a/.github/workflows/publish-public-operator-docker-image.yaml b/.github/workflows/publish-public-operator-docker-image.yaml
index d55806c6b..db3c527c8 100644
--- a/.github/workflows/publish-public-operator-docker-image.yaml
+++ b/.github/workflows/publish-public-operator-docker-image.yaml
@@ -53,7 +53,7 @@ jobs:
uses: trstringer/manual-approval@v1
with:
secret: ${{ github.token }}
- approvers: thomasm-ttd,atarassov-ttd,cody-constine-ttd
+ approvers: atarassov-ttd,vishalegbert-ttd,sunnywu,cody-constine-ttd
minimum-approvals: 1
issue-title: Creating Major version of UID2-Operator
diff --git a/.github/workflows/run-e2e-tests-on-operator.yaml b/.github/workflows/run-e2e-tests-on-operator.yaml
index e57756c1b..462a992e1 100644
--- a/.github/workflows/run-e2e-tests-on-operator.yaml
+++ b/.github/workflows/run-e2e-tests-on-operator.yaml
@@ -1,10 +1,10 @@
name: Run Operator E2E Tests
-run-name: ${{ format('Run Operator E2E Tests - {0} {1}', inputs.operator_type, inputs.identity_scope) }} by @${{ github.actor }}
+run-name: ${{ format('Run Operator E2E Tests - {0} {1} {2}', inputs.operator_type, inputs.identity_scope, inputs.target_environment) }} by @${{ github.actor }}
on:
workflow_dispatch:
inputs:
operator_type:
- description: The operator type [public, gcp, azure, aws, eks]
+ description: The operator type [public, gcp, azure, aws, aks]
required: true
type: choice
options:
@@ -12,7 +12,7 @@ on:
- gcp
- azure
- aws
- - eks
+ - aks
identity_scope:
description: The identity scope [UID2, EUID]
required: true
@@ -20,6 +20,19 @@ on:
options:
- UID2
- EUID
+ target_environment:
+ description: PRIVATE OPERATORS ONLY - The target environment [mock, integ, prod]
+ required: true
+ type: choice
+ options:
+ - mock
+ - integ
+ - prod
+ delay_operator_shutdown:
+ description: PRIVATE OPERATORS ONLY - If true, will delay operator shutdown by 24 hours.
+ required: true
+ type: boolean
+ default: false
operator_image_version:
description: 'Image: Operator image version (for gcp/azure, set appropriate image)'
type: string
@@ -51,22 +64,25 @@ on:
"region": "us-east-1",
"ami": "ami-xxxxx",
"pcr0": "xxxxx" }'
- eks:
- description: The arguments for EKS operator
- type: string
- default: '{
- "pcr0": "xxxxx" }'
workflow_call:
inputs:
operator_type:
- description: The operator type [public, gcp, azure, aws, eks]
+ description: The operator type [public, gcp, azure, aws, aks]
type: string
default: public
identity_scope:
description: The identity scope [UID2, EUID]
type: string
default: UID2
+ target_environment:
+ description: PRIVATE OPERATORS ONLY - The target environment [mock, integ, prod]
+ type: string
+ default: mock
+ delay_operator_shutdown:
+ description: PRIVATE OPERATORS ONLY - If true, will delay operator shutdown by 24 hours.
+ type: boolean
+ default: false
operator_image_version:
description: 'Image: Operator image version (for gcp/azure, set appropriate image)'
type: string
@@ -97,11 +113,6 @@ on:
"region": "us-east-1",
"ami": "ami-xxxxx",
"pcr0": "xxxxx" }'
- eks:
- description: The arguments for EKS operator
- type: string
- default: '{
- "pcr0": "xxxxx" }'
jobs:
e2e-test:
@@ -109,22 +120,21 @@ jobs:
uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-run-e2e-tests.yaml@v3
with:
operator_type: ${{ inputs.operator_type }}
+ identity_scope: ${{ inputs.identity_scope }}
+ target_environment: ${{ inputs.target_environment }}
+ delay_operator_shutdown: ${{ inputs.delay_operator_shutdown }}
operator_image_version: ${{ inputs.operator_image_version }}
core_image_version: ${{ inputs.core_image_version }}
optout_image_version: ${{ inputs.optout_image_version }}
e2e_image_version: ${{ inputs.e2e_image_version }}
operator_branch: ${{ github.ref }}
- branch_core: ${{ fromJson(inputs.branch).core }}
- branch_optout: ${{ fromJson(inputs.branch).optout }}
- branch_admin: ${{ fromJson(inputs.branch).admin }}
- uid2_e2e_identity_scope: ${{ inputs.identity_scope }}
+ core_branch: ${{ fromJson(inputs.branch).core }}
+ optout_branch: ${{ fromJson(inputs.branch).optout }}
+ admin_branch: ${{ fromJson(inputs.branch).admin }}
gcp_workload_identity_provider_id: ${{ vars.GCP_WORKLOAD_IDENTITY_PROVIDER_ID }}
gcp_service_account: ${{ vars.GCP_SERVICE_ACCOUNT }}
gcp_project: ${{ vars.GCP_PROJECT }}
aws_region: ${{ fromJson(inputs.aws).region }}
aws_ami: ${{ fromJson(inputs.aws).ami }}
aws_pcr0: ${{ fromJson(inputs.aws).pcr0 }}
- eks_pcr0: ${{ fromJson(inputs.eks).pcr0 }}
- eks_test_cluster: ${{ vars.EKS_TEST_CLUSTER }}
- eks_test_cluster_region: ${{ vars.EKS_TEST_CLUSTER_REGION }}
secrets: inherit
diff --git a/.github/workflows/validate-image.yaml b/.github/workflows/validate-image.yaml
index 524f19102..37b4bf912 100644
--- a/.github/workflows/validate-image.yaml
+++ b/.github/workflows/validate-image.yaml
@@ -19,7 +19,7 @@ on:
jobs:
build-publish-docker-default:
- uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v2
+ uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v3
with:
failure_severity: ${{ inputs.failure_severity || 'CRITICAL,HIGH' }}
fail_on_error: ${{ inputs.fail_on_error || true }}
@@ -27,7 +27,7 @@ jobs:
java_version: 21
secrets: inherit
build-publish-docker-aws:
- uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v2
+ uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v3
with:
failure_severity: ${{ inputs.failure_severity || 'CRITICAL,HIGH' }}
fail_on_error: ${{ inputs.fail_on_error || true }}
@@ -36,7 +36,7 @@ jobs:
secrets: inherit
needs: [build-publish-docker-default]
build-publish-docker-gcp:
- uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v2
+ uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v3
with:
failure_severity: ${{ inputs.failure_severity || 'CRITICAL,HIGH' }}
fail_on_error: ${{ inputs.fail_on_error || true }}
@@ -45,7 +45,7 @@ jobs:
secrets: inherit
needs: [build-publish-docker-aws]
build-publish-docker-azure:
- uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v2
+ uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-validate-image.yaml@v3
with:
failure_severity: ${{ inputs.failure_severity || 'CRITICAL,HIGH' }}
fail_on_error: ${{ inputs.fail_on_error || true }}
diff --git a/.github/workflows/vulnerability-scan-failure-notify.yaml b/.github/workflows/vulnerability-scan-failure-notify.yaml
new file mode 100644
index 000000000..7a87e06fc
--- /dev/null
+++ b/.github/workflows/vulnerability-scan-failure-notify.yaml
@@ -0,0 +1,24 @@
+name: Vulnerability Scan Failure Slack Notify
+on:
+ workflow_dispatch:
+ inputs:
+ vulnerability_severity:
+ description: The severity to fail the workflow if such vulnerability is detected. DO NOT override it unless a Jira ticket is raised. DO NOT use 'CRITICAL' unless a Jira ticket is raised.
+ type: choice
+ options:
+ - CRITICAL,HIGH
+ - CRITICAL,HIGH,MEDIUM
+ - CRITICAL
+ default: 'CRITICAL,HIGH'
+ schedule:
+ - cron: '0 16 * * *' # 9:00 AM GMT -7
+ - cron: '0 0 * * *' # 5:00 PM GMT -7
+
+jobs:
+ vulnerability-scan-failure-notify:
+ uses: IABTechLab/uid2-shared-actions/.github/workflows/shared-vulnerability-scan-failure-notify.yaml@v3
+ secrets:
+ SLACK_WEBHOOK : ${{ secrets.SLACK_WEBHOOK }}
+ with:
+ scan_type : image
+ java_version: "21"
diff --git a/.trivyignore b/.trivyignore
index 3aa85f54a..dcb1e8d4b 100644
--- a/.trivyignore
+++ b/.trivyignore
@@ -1,5 +1,12 @@
# List any vulnerability that are to be accepted
# See https://aquasecurity.github.io/trivy/v0.35/docs/vulnerability/examples/filter/
# for more details
-# e.g.
-# CVE-2022-3996
+
+# https://thetradedesk.atlassian.net/browse/UID2-4460
+CVE-2024-47535
+
+# https://thetradedesk.atlassian.net/browse/UID2-4874
+CVE-2025-24970 exp:2025-04-03
+
+# https://thetradedesk.atlassian.net/browse/UID2-5186
+CVE-2024-8176 exp:2025-04-03
diff --git a/Dockerfile b/Dockerfile
index c698202c2..db73539e2 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,5 +1,5 @@
-# sha from https://hub.docker.com/layers/amd64/eclipse-temurin/21.0.4_7-jre-alpine/images/sha256-8179ddc8a6c5ac9af935020628763b9a5a671e0914976715d2b61b21881cefca
-FROM eclipse-temurin@sha256:8179ddc8a6c5ac9af935020628763b9a5a671e0914976715d2b61b21881cefca
+# sha from https://hub.docker.com/layers/amd64/eclipse-temurin/21.0.6_7-jre-alpine/images/sha256-f184bb601f9e6068dd0a92738764d1ff447ab68c15ddbf8c303c5c29de9a1df8
+FROM eclipse-temurin@sha256:f184bb601f9e6068dd0a92738764d1ff447ab68c15ddbf8c303c5c29de9a1df8
WORKDIR /app
EXPOSE 8080
@@ -7,19 +7,17 @@ EXPOSE 8080
ARG JAR_NAME=uid2-operator
ARG JAR_VERSION=1.0.0-SNAPSHOT
ARG IMAGE_VERSION=1.0.0.unknownhash
-ARG EXTRA_CONFIG
ENV JAR_NAME=${JAR_NAME}
ENV JAR_VERSION=${JAR_VERSION}
ENV IMAGE_VERSION=${IMAGE_VERSION}
ENV REGION=us-east-2
-ENV LOKI_HOSTNAME=loki
-ENV LOGBACK_CONF=${LOGBACK_CONF:-./conf/logback.xml}
COPY ./target/${JAR_NAME}-${JAR_VERSION}-jar-with-dependencies.jar /app/${JAR_NAME}-${JAR_VERSION}.jar
COPY ./target/${JAR_NAME}-${JAR_VERSION}-sources.jar /app
COPY ./target/${JAR_NAME}-${JAR_VERSION}-static.tar.gz /app/static.tar.gz
-COPY ./conf/default-config.json ${EXTRA_CONFIG} /app/conf/
+COPY ./conf/default-config.json /app/conf/
COPY ./conf/*.xml /app/conf/
+COPY ./conf/runtime-config-defaults.json /app/conf/
RUN tar xzvf /app/static.tar.gz --no-same-owner --no-same-permissions && rm -f /app/static.tar.gz
@@ -30,5 +28,5 @@ CMD java \
-XX:MaxRAMPercentage=95 -XX:-UseCompressedOops -XX:+PrintFlagsFinal -XX:-OmitStackTraceInFastThrow \
-Djava.security.egd=file:/dev/./urandom \
-Dvertx.logger-delegate-factory-class-name=io.vertx.core.logging.SLF4JLogDelegateFactory \
- -Dlogback.configurationFile=${LOGBACK_CONF} \
+ -Dlogback.configurationFile=/app/conf/logback.xml \
-jar ${JAR_NAME}-${JAR_VERSION}.jar
diff --git a/Makefile.eif b/Makefile.eif
index 395685024..38e47c13c 100644
--- a/Makefile.eif
+++ b/Makefile.eif
@@ -13,23 +13,14 @@ all: build_eif
build_eif: uid2operator.eif euidoperator.eif
-uid2operator.eif: build_artifacts build_configs build/proxies.nitro.yaml build/syslog-ng-client.conf build/syslog-ng-core_4.6.0-1_amd64.deb build/syslog-ng-ose-pub.asc build/entrypoint.sh build/vsockpx build/Dockerfile build/load_config.py build/make_config.py
- cd build; docker build -t uid2operator . --build-arg JAR_VERSION=`cat package.version` --build-arg IMAGE_VERSION=`cat package.version`-`git show --format="%h" --no-patch`; docker save -o ./uid2operator.tar uid2operator; docker cp ./uid2operator.tar amazonlinux:/uid2operator.tar
+uid2operator.eif: build_artifacts build_configs build/proxies.nitro.yaml build/syslog-ng-client.conf build/syslog-ng-core_4.6.0-1_amd64.deb build/syslog-ng-ose-pub.asc build/entrypoint.sh build/vsockpx build/Dockerfile
+ cd build; docker build -t uid2operator . --build-arg JAR_VERSION=`cat package.version` --build-arg IMAGE_VERSION=`cat package.version`-`git show --format="%h" --no-patch`; docker save -o ./uid2operator.tar uid2operator; docker cp ./uid2operator.tar amazonlinux:/uid2operator.tar; rm -f ./uid2operator.tar
docker exec amazonlinux bash aws_nitro_eif.sh uid2operator
-euidoperator.eif: build_artifacts build_configs build/proxies.nitro.yaml build/syslog-ng-client.conf build/syslog-ng-core_4.6.0-1_amd64.deb build/syslog-ng-ose-pub.asc build/entrypoint.sh build/vsockpx build/Dockerfile build/load_config.py build/make_config.py
- cd build; docker build -t euidoperator . --build-arg IDENTITY_SCOPE='EUID' --build-arg JAR_VERSION=`cat package.version` --build-arg IMAGE_VERSION=`cat package.version`-`git show --format="%h" --no-patch`; docker save -o ./euidoperator.tar euidoperator; docker cp ./euidoperator.tar amazonlinux:/euidoperator.tar
+euidoperator.eif: build_artifacts build_configs build/proxies.nitro.yaml build/syslog-ng-client.conf build/syslog-ng-core_4.6.0-1_amd64.deb build/syslog-ng-ose-pub.asc build/entrypoint.sh build/vsockpx build/Dockerfile
+ cd build; docker build -t euidoperator . --build-arg IDENTITY_SCOPE='EUID' --build-arg JAR_VERSION=`cat package.version` --build-arg IMAGE_VERSION=`cat package.version`-`git show --format="%h" --no-patch`; docker save -o ./euidoperator.tar euidoperator; docker cp ./euidoperator.tar amazonlinux:/euidoperator.tar; rm -f ./euidoperator.tar
docker exec amazonlinux bash aws_nitro_eif.sh euidoperator
-##################################################################################################################################################################
-
-# Config scripts
-
-build/load_config.py: ./scripts/aws/load_config.py
- cp ./scripts/aws/load_config.py ./build/
-
-build/make_config.py: ./scripts/aws/make_config.py
- cp ./scripts/aws/make_config.py ./build/
##################################################################################################################################################################
@@ -37,26 +28,29 @@ build/make_config.py: ./scripts/aws/make_config.py
.PHONY: build_configs
-build_configs: build/conf/default-config.json build/conf/prod-uid2-config.json build/conf/integ-uid2-config.json build/conf/prod-euid-config.json build/conf/integ-euid-config.json build/conf/logback.xml
+build_configs: build/conf/default-config.json build/conf/euid-integ-config.json build/conf/euid-prod-config.json build/conf/uid2-integ-config.json build/conf/uid2-prod-config.json build/conf/logback.xml build/conf/logback-debug.xml
build/conf/default-config.json: build_artifacts ./scripts/aws/conf/default-config.json
cp ./scripts/aws/conf/default-config.json ./build/conf/
-build/conf/prod-uid2-config.json: build_artifacts ./scripts/aws/conf/prod-uid2-config.json
- cp ./scripts/aws/conf/prod-uid2-config.json ./build/conf/
+build/conf/euid-integ-config.json: build_artifacts ./scripts/aws/conf/euid-integ-config.json
+ cp ./scripts/aws/conf/euid-integ-config.json ./build/conf/
-build/conf/prod-euid-config.json: build_artifacts ./scripts/aws/conf/prod-euid-config.json
- cp ./scripts/aws/conf/prod-euid-config.json ./build/conf/
+build/conf/euid-prod-config.json: build_artifacts ./scripts/aws/conf/euid-prod-config.json
+ cp ./scripts/aws/conf/euid-prod-config.json ./build/conf/
-build/conf/integ-uid2-config.json: build_artifacts ./scripts/aws/conf/integ-uid2-config.json
- cp ./scripts/aws/conf/integ-uid2-config.json ./build/conf/
+build/conf/uid2-integ-config.json: build_artifacts ./scripts/aws/conf/uid2-integ-config.json
+ cp ./scripts/aws/conf/uid2-integ-config.json ./build/conf/
-build/conf/integ-euid-config.json: build_artifacts ./scripts/aws/conf/integ-euid-config.json
- cp ./scripts/aws/conf/integ-euid-config.json ./build/conf/
+build/conf/uid2-prod-config.json: build_artifacts ./scripts/aws/conf/uid2-prod-config.json
+ cp ./scripts/aws/conf/uid2-prod-config.json ./build/conf/
build/conf/logback.xml: build_artifacts ./scripts/aws/conf/logback.xml
cp ./scripts/aws/conf/logback.xml ./build/conf/
+build/conf/logback-debug.xml: build_artifacts ./scripts/aws/conf/logback-debug.xml
+ cp ./scripts/aws/conf/logback-debug.xml ./build/conf/
+
build/Dockerfile: build_artifacts ./scripts/aws/Dockerfile
cp ./scripts/aws/Dockerfile ./build/
diff --git a/conf/default-config.json b/conf/default-config.json
index 224df8906..0683103a5 100644
--- a/conf/default-config.json
+++ b/conf/default-config.json
@@ -30,11 +30,14 @@
"salts_metadata_path": "salts/metadata.json",
"services_metadata_path": "services/metadata.json",
"service_links_metadata_path": "service_links/metadata.json",
+ "cloud_encryption_keys_metadata_path": "cloud_encryption_keys/metadata.json",
+ "encrypted_files": false,
+ "cloud_encryption_keys_refresh_ms": 300000,
"optout_metadata_path": null,
"optout_inmem_cache": false,
"enclave_platform": null,
"failure_shutdown_wait_hours": 120,
"sharing_token_expiry_seconds": 2592000,
- "operator_type": "public"
-
+ "operator_type": "public",
+ "enable_remote_config": false
}
diff --git a/conf/docker-config.json b/conf/docker-config.json
index 648b922a8..6c376b5ea 100644
--- a/conf/docker-config.json
+++ b/conf/docker-config.json
@@ -4,7 +4,6 @@
"storage_mock": true,
"refresh_token_expires_after_seconds": 86400,
"refresh_identity_token_after_seconds": 900,
- "advertising_token_v3": false,
"refresh_token_v3": false,
"identity_v3": false,
"identity_scope": "uid2",
@@ -32,12 +31,23 @@
"salts_metadata_path": "/com.uid2.core/test/salts/metadata.json",
"services_metadata_path": "/com.uid2.core/test/services/metadata.json",
"service_links_metadata_path": "/com.uid2.core/test/service_links/metadata.json",
+ "cloud_encryption_keys_metadata_path": "/com.uid2.core/test/cloud_encryption_keys/metadata.json",
+ "encrypted_files": true,
"identity_token_expires_after_seconds": 3600,
"optout_metadata_path": null,
"optout_inmem_cache": false,
"enclave_platform": null,
"failure_shutdown_wait_hours": 120,
"salts_expired_shutdown_hours": 12,
- "operator_type": "public"
-
+ "operator_type": "public",
+ "runtime_config_store": {
+ "type": "file",
+ "config" : {
+ "path": "conf/runtime-config-defaults.json",
+ "format": "json"
+ },
+ "config_scan_period_ms": 5000
+ },
+ "disable_optout_token": false,
+ "enable_remote_config": false
}
diff --git a/conf/integ-config.json b/conf/integ-config.json
index f1cf90742..b741cf2a3 100644
--- a/conf/integ-config.json
+++ b/conf/integ-config.json
@@ -13,7 +13,16 @@
"core_api_token": "trusted-partner-key",
"optout_api_token": "test-operator-key",
"optout_api_uri": "http://localhost:8081/optout/replicate",
+ "cloud_encryption_keys_metadata_path": "http://localhost:8088/cloud_encryption_keys/retrieve",
"salts_expired_shutdown_hours": 12,
- "operator_type": "public"
-
+ "operator_type": "public",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "http://localhost:8088/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ },
+ "disable_optout_token": false,
+ "enable_remote_config": false
}
\ No newline at end of file
diff --git a/conf/local-config.json b/conf/local-config.json
index f19a4357d..7c61e2cad 100644
--- a/conf/local-config.json
+++ b/conf/local-config.json
@@ -9,12 +9,10 @@
"salts_metadata_path": "/com.uid2.core/test/salts/metadata.json",
"services_metadata_path": "/com.uid2.core/test/services/metadata.json",
"service_links_metadata_path": "/com.uid2.core/test/service_links/metadata.json",
+ "cloud_encryption_keys_metadata_path": "/com.uid2.core/test/cloud_encryption_keys/metadata.json",
"identity_token_expires_after_seconds": 3600,
"refresh_token_expires_after_seconds": 86400,
"refresh_identity_token_after_seconds": 900,
- "advertising_token_v3": false,
- "advertising_token_v4_percentage": 0,
- "site_ids_using_v4_tokens": "",
"refresh_token_v3": false,
"identity_v3": false,
"identity_scope": "uid2",
@@ -39,5 +37,16 @@
"key_sharing_endpoint_provide_app_names": true,
"client_side_token_generate_log_invalid_http_origins": true,
"salts_expired_shutdown_hours": 12,
- "operator_type": "public"
+ "operator_type": "public",
+ "encrypted_files": true,
+ "runtime_config_store": {
+ "type": "file",
+ "config" : {
+ "path": "conf/runtime-config-defaults.json",
+ "format": "json"
+ },
+ "config_scan_period_ms": 5000
+ },
+ "disable_optout_token": false,
+ "enable_remote_config": false
}
diff --git a/conf/local-e2e-docker-private-config.json b/conf/local-e2e-docker-private-config.json
index ef05b8772..87e5cd7a8 100644
--- a/conf/local-e2e-docker-private-config.json
+++ b/conf/local-e2e-docker-private-config.json
@@ -11,10 +11,11 @@
"keysets_metadata_path": "http://core:8088/key/keyset/refresh",
"keyset_keys_metadata_path": "http://core:8088/key/keyset-keys/refresh",
"salts_metadata_path": "http://core:8088/salt/refresh",
+ "cloud_encryption_keys_metadata_path": "http://core:8088/cloud_encryption_keys/retrieve",
+ "encrypted_files": false,
"identity_token_expires_after_seconds": 3600,
"refresh_token_expires_after_seconds": 86400,
"refresh_identity_token_after_seconds": 900,
- "advertising_token_v3": false,
"refresh_token_v3": true,
"identity_v3": false,
"identity_scope": "uid2",
@@ -28,5 +29,13 @@
"optout_delta_rotate_interval": 60,
"cloud_refresh_interval": 30,
"salts_expired_shutdown_hours": 12,
- "operator_type": "private"
+ "operator_type": "private",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "http://core:8088/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ },
+ "enable_remote_config": false
}
diff --git a/conf/local-e2e-docker-public-config.json b/conf/local-e2e-docker-public-config.json
index 60f0abd92..60d5e287a 100644
--- a/conf/local-e2e-docker-public-config.json
+++ b/conf/local-e2e-docker-public-config.json
@@ -13,10 +13,11 @@
"salts_metadata_path": "http://core:8088/salt/refresh",
"services_metadata_path": "http://core:8088/services/refresh",
"service_links_metadata_path": "http://core:8088/service_links/refresh",
+ "cloud_encryption_keys_metadata_path": "http://core:8088/cloud_encryption_keys/retrieve",
+ "encrypted_files": false,
"identity_token_expires_after_seconds": 3600,
"refresh_token_expires_after_seconds": 86400,
"refresh_identity_token_after_seconds": 900,
- "advertising_token_v3": false,
"refresh_token_v3": true,
"identity_v3": false,
"identity_scope": "uid2",
@@ -34,6 +35,14 @@
"optout_status_api_enabled": true,
"cloud_refresh_interval": 30,
"salts_expired_shutdown_hours": 12,
- "operator_type": "public"
-
+ "operator_type": "public",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "http://core:8088/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ },
+ "disable_optout_token": false,
+ "enable_remote_config": false
}
diff --git a/conf/local-e2e-private-config.json b/conf/local-e2e-private-config.json
index e9d3f8b53..06b1ddb3a 100644
--- a/conf/local-e2e-private-config.json
+++ b/conf/local-e2e-private-config.json
@@ -13,10 +13,11 @@
"salts_metadata_path": "http://localhost:8088/salt/refresh",
"services_metadata_path": "http://localhost:8088/services/refresh",
"service_links_metadata_path": "http://localhost:8088/service_links/refresh",
+ "cloud_encryption_keys_metadata_path": "http://localhost:8088/cloud_encryption_keys/retrieve",
+ "encrypted_files": false,
"identity_token_expires_after_seconds": 3600,
"refresh_token_expires_after_seconds": 86400,
"refresh_identity_token_after_seconds": 900,
- "advertising_token_v3": false,
"refresh_token_v3": true,
"identity_v3": false,
"identity_scope": "uid2",
@@ -39,6 +40,13 @@
"client_side_token_generate_domain_name_check_enabled": false,
"client_side_token_generate_log_invalid_http_origins": true,
"salts_expired_shutdown_hours": 12,
- "operator_type": "private"
-
+ "operator_type": "private",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "http://localhost:8088/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ },
+ "enable_remote_config": false
}
diff --git a/conf/local-e2e-public-config.json b/conf/local-e2e-public-config.json
index cb635b103..6b5e0fc03 100644
--- a/conf/local-e2e-public-config.json
+++ b/conf/local-e2e-public-config.json
@@ -13,10 +13,11 @@
"salts_metadata_path": "http://localhost:8088/salt/refresh",
"services_metadata_path": "http://localhost:8088/services/refresh",
"service_links_metadata_path": "http://localhost:8088/service_links/refresh",
+ "cloud_encryption_keys_metadata_path": "http://localhost:8088/cloud_encryption_keys/retrieve",
+ "encrypted_files": false,
"identity_token_expires_after_seconds": 3600,
"refresh_token_expires_after_seconds": 86400,
"refresh_identity_token_after_seconds": 900,
- "advertising_token_v3": false,
"refresh_token_v3": true,
"identity_v3": false,
"identity_scope": "uid2",
@@ -40,6 +41,14 @@
"key_sharing_endpoint_provide_app_names": true,
"client_side_token_generate_log_invalid_http_origins": true,
"salts_expired_shutdown_hours": 12,
- "operator_type": "public"
-
+ "operator_type": "public",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "http://localhost:8088/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ },
+ "disable_optout_token": false,
+ "enable_remote_config": false
}
diff --git a/conf/logback.loki-local.xml b/conf/logback.loki-local.xml
deleted file mode 100644
index ff0f0adb1..000000000
--- a/conf/logback.loki-local.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
-
- http://localhost:3100/loki/api/v1/push
-
-
-
-
- l=%level h=${HOSTNAME} po=${port_offset:-0} c=%logger{20} t=%thread | %msg %ex
-
- true
-
-
-
-
-
-
- %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg %ex%n
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/conf/logback.loki.xml b/conf/logback.loki.xml
deleted file mode 100644
index d2358c272..000000000
--- a/conf/logback.loki.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
-
- http://${LOKI_HOSTNAME}:3100/loki/api/v1/push
-
-
-
-
- l=%level h=${HOSTNAME} po=${port_offset:-0} c=%logger{20} t=%thread | %msg %ex
-
- true
-
-
-
-
-
-
- %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg %ex%n
-
-
-
-
-
-
-
-
-
diff --git a/conf/runtime-config-defaults.json b/conf/runtime-config-defaults.json
new file mode 100644
index 000000000..817d714dd
--- /dev/null
+++ b/conf/runtime-config-defaults.json
@@ -0,0 +1,6 @@
+{
+ "identity_token_expires_after_seconds": 3600,
+ "refresh_token_expires_after_seconds": 86400,
+ "refresh_identity_token_after_seconds": 900,
+ "sharing_token_expiry_seconds": 2592000
+}
\ No newline at end of file
diff --git a/conf/validator-latest-e2e-docker-public-config.json b/conf/validator-latest-e2e-docker-public-config.json
index cabf23380..38b6c2b28 100644
--- a/conf/validator-latest-e2e-docker-public-config.json
+++ b/conf/validator-latest-e2e-docker-public-config.json
@@ -14,10 +14,11 @@
"salts_metadata_path": "http://core:8088/salt/refresh",
"services_metadata_path": "http://core:8088/services/refresh",
"service_links_metadata_path": "http://core:8088/service_links/refresh",
+ "cloud_encryption_keys_metadata_path": "http://core:8088/cloud_encryption_keys/retrieve",
+ "encrypted_files": true,
"identity_token_expires_after_seconds": 3600,
"refresh_token_expires_after_seconds": 86400,
"refresh_identity_token_after_seconds": 900,
- "advertising_token_v3": false,
"refresh_token_v3": true,
"identity_v3": false,
"identity_scope": "uid2",
@@ -33,6 +34,14 @@
"optout_api_uri": "http://optout:8081/optout/replicate",
"optout_delta_rotate_interval": 60,
"cloud_refresh_interval": 30,
- "operator_type": "public"
-
+ "operator_type": "public",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "http://core:8088/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ },
+ "disable_optout_token": false,
+ "enable_remote_config": false
}
diff --git a/pom.xml b/pom.xml
index 082c67876..15e312bcf 100644
--- a/pom.xml
+++ b/pom.xml
@@ -6,11 +6,11 @@
com.uid2
uid2-operator
- 5.40.86
-
+ 5.50.36
+
UTF-8
- 4.5.3
+ 4.5.11
1.0.22
5.11.2
5.11.2
@@ -20,9 +20,9 @@
1.12.2
2.1.6
2.1.0
- 2.1.0
+ 2.1.13
2.1.0
- 7.19.0
+ 9.0.8
${project.version}
21
21
@@ -162,11 +162,6 @@
logback-classic
1.5.8
-
- com.github.loki4j
- loki-logback-appender
- 1.5.2
-
net.logstash.logback
logstash-logback-encoder
diff --git a/scripts/aws/Dockerfile b/scripts/aws/Dockerfile
index e210001c3..67aa17368 100644
--- a/scripts/aws/Dockerfile
+++ b/scripts/aws/Dockerfile
@@ -31,16 +31,12 @@ COPY ./target/${JAR_NAME}-${JAR_VERSION}-jar-with-dependencies.jar /app/${JAR_NA
COPY ./static /app/static
COPY ./libjnsm.so /app/lib/
COPY ./vsockpx /app/
-COPY ./make_config.py /app/
COPY ./entrypoint.sh /app/
COPY ./proxies.nitro.yaml /app/
-COPY ./conf/default-config.json /app/conf/
-COPY ./conf/prod-uid2-config.json /app/conf/
-COPY ./conf/integ-uid2-config.json /app/conf/
-COPY ./conf/prod-euid-config.json /app/conf/
-COPY ./conf/integ-euid-config.json /app/conf/
-COPY ./conf/*.xml /app/conf/
-COPY ./syslog-ng-client.conf /etc/syslog-ng/syslog-ng.conf
+COPY ./conf/default-config.json /app/conf/
+COPY ./conf/*.json /app/conf/
+COPY ./conf/*.xml /app/conf/
+COPY ./syslog-ng-client.conf /etc/syslog-ng/syslog-ng.conf
RUN chmod +x /app/vsockpx && chmod +x /app/entrypoint.sh
diff --git a/scripts/aws/EUID_CloudFormation.template.yml b/scripts/aws/EUID_CloudFormation.template.yml
index 9c5982488..72dd5141c 100644
--- a/scripts/aws/EUID_CloudFormation.template.yml
+++ b/scripts/aws/EUID_CloudFormation.template.yml
@@ -118,6 +118,10 @@ Mappings:
AMI: ami-xxxxxxxxxxxxxxxxx
eu-north-1:
AMI: ami-xxxxxxxxxxxxxxxxx
+Conditions:
+ IsIntegEnvironment: !Equals
+ - !Ref DeployToEnvironment
+ - integ
Resources:
KMSKey:
Type: AWS::KMS::Key
@@ -154,13 +158,23 @@ Resources:
Description: EUID Token
KmsKeyId: !GetAtt KMSKey.Arn
Name: !Sub 'euid-config-stack-${AWS::StackName}'
- SecretString: !Sub '{
- "api_token":"${APIToken}",
- "service_instances":6,
- "enclave_cpu_count":6,
- "enclave_memory_mb":24576,
- "environment":"${DeployToEnvironment}"
- }'
+ SecretString: !Join
+ - ''
+ - - '{'
+ - '"core_base_url": "'
+ - !If [IsIntegEnvironment, 'https://core.integ.euid.eu', 'https://core.prod.euid.eu']
+ - '", "optout_base_url": "'
+ - !If [IsIntegEnvironment, 'https://optout.integ.euid.eu', 'https://optout.prod.euid.eu']
+ - '", "operator_key": "'
+ - Ref: APIToken
+ - '"'
+ - ', "service_instances": 6'
+ - ', "enclave_cpu_count": 6'
+ - ', "enclave_memory_mb": 24576'
+ - ', "environment": "'
+ - Ref: DeployToEnvironment
+ - '"'
+ - '}'
WorkerRole:
Type: 'AWS::IAM::Role'
Properties:
diff --git a/scripts/aws/UID_CloudFormation.template.yml b/scripts/aws/UID_CloudFormation.template.yml
index 711d1ab0e..e1431159e 100644
--- a/scripts/aws/UID_CloudFormation.template.yml
+++ b/scripts/aws/UID_CloudFormation.template.yml
@@ -146,6 +146,10 @@ Mappings:
AMI: ami-xxxxxxxxxxxxxxxxx
af-south-1:
AMI: ami-xxxxxxxxxxxxxxxxx
+Conditions:
+ IsIntegEnvironment: !Equals
+ - !Ref DeployToEnvironment
+ - integ
Resources:
KMSKey:
Type: AWS::KMS::Key
@@ -182,13 +186,23 @@ Resources:
Description: UID2 Token
KmsKeyId: !GetAtt KMSKey.Arn
Name: !Sub 'uid2-config-stack-${AWS::StackName}'
- SecretString: !Sub '{
- "api_token":"${APIToken}",
- "service_instances":6,
- "enclave_cpu_count":6,
- "enclave_memory_mb":24576,
- "environment":"${DeployToEnvironment}"
- }'
+ SecretString: !Join
+ - ''
+ - - '{'
+ - '"core_base_url": "'
+ - !If [IsIntegEnvironment, 'https://core-integ.uidapi.com', 'https://core-prod.uidapi.com']
+ - '", "optout_base_url": "'
+ - !If [IsIntegEnvironment, 'https://optout-integ.uidapi.com', 'https://optout-prod.uidapi.com']
+ - '", "operator_key": "'
+ - Ref: APIToken
+ - '"'
+ - ', "service_instances": 6'
+ - ', "enclave_cpu_count": 6'
+ - ', "enclave_memory_mb": 24576'
+ - ', "environment": "'
+ - Ref: DeployToEnvironment
+ - '"'
+ - '}'
WorkerRole:
Type: 'AWS::IAM::Role'
Properties:
diff --git a/scripts/aws/conf/default-config.json b/scripts/aws/conf/default-config.json
index 6db89fd29..8f4477336 100644
--- a/scripts/aws/conf/default-config.json
+++ b/scripts/aws/conf/default-config.json
@@ -30,11 +30,12 @@
"service_links_metadata_path": "service_links/metadata.json",
"optout_metadata_path": null,
"optout_inmem_cache": false,
- "enclave_platform": null,
+ "enclave_platform": "aws-nitro",
"failure_shutdown_wait_hours": 120,
"sharing_token_expiry_seconds": 2592000,
"validate_service_links": false,
- "advertising_token_v4_percentage": 100,
- "site_ids_using_v4_tokens": "",
+ "identity_token_expires_after_seconds": 86400,
+ "refresh_token_expires_after_seconds": 2592000,
+ "refresh_identity_token_after_seconds": 3600,
"operator_type": "private"
-}
+}
\ No newline at end of file
diff --git a/scripts/aws/conf/integ-euid-config.json b/scripts/aws/conf/euid-integ-config.json
similarity index 84%
rename from scripts/aws/conf/integ-euid-config.json
rename to scripts/aws/conf/euid-integ-config.json
index 45d3dbe94..0944b74e9 100644
--- a/scripts/aws/conf/integ-euid-config.json
+++ b/scripts/aws/conf/euid-integ-config.json
@@ -10,6 +10,8 @@
"optout_metadata_path": "https://optout.integ.euid.eu/optout/refresh",
"core_attest_url": "https://core.integ.euid.eu/attest",
"optout_api_uri": "https://optout.integ.euid.eu/optout/replicate",
+ "cloud_encryption_keys_metadata_path": "https://core.integ.euid.eu/cloud_encryption_keys/retrieve",
"optout_s3_folder": "optout/",
- "allow_legacy_api": false
-}
+ "allow_legacy_api": false,
+ "identity_scope": "euid"
+}
\ No newline at end of file
diff --git a/scripts/aws/conf/prod-euid-config.json b/scripts/aws/conf/euid-prod-config.json
similarity index 80%
rename from scripts/aws/conf/prod-euid-config.json
rename to scripts/aws/conf/euid-prod-config.json
index c7784a381..e09b202f3 100644
--- a/scripts/aws/conf/prod-euid-config.json
+++ b/scripts/aws/conf/euid-prod-config.json
@@ -10,6 +10,7 @@
"service_links_metadata_path": "https://core.prod.euid.eu/service_links/refresh",
"optout_metadata_path": "https://optout.prod.euid.eu/optout/refresh",
"core_attest_url": "https://core.prod.euid.eu/attest",
+ "cloud_encryption_keys_metadata_path": "https://core.prod.euid.eu/cloud_encryption_keys/retrieve",
"core_api_token": "your-api-token",
"optout_s3_path_compat": false,
"optout_api_uri": "https://optout.prod.euid.eu/optout/replicate",
@@ -24,9 +25,15 @@
"refresh_identity_token_after_seconds": 3600,
"allow_legacy_api": false,
"identity_scope": "euid",
- "advertising_token_v3": true,
"refresh_token_v3": true,
- "enable_phone_support": false,
+ "enable_phone_support": true,
"enable_v1_phone_support": false,
- "enable_v2_encryption": true
-}
+ "enable_v2_encryption": true,
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "https://core.prod.euid.eu/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ }
+}
\ No newline at end of file
diff --git a/scripts/aws/conf/logback-debug.xml b/scripts/aws/conf/logback-debug.xml
new file mode 100644
index 000000000..c012f8d25
--- /dev/null
+++ b/scripts/aws/conf/logback-debug.xml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+ REDACTED - S3
+ \S+s3\.amazonaws\.com\/\S*X-Amz-Security-Token=\S+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/scripts/aws/conf/integ-uid2-config.json b/scripts/aws/conf/uid2-integ-config.json
similarity index 87%
rename from scripts/aws/conf/integ-uid2-config.json
rename to scripts/aws/conf/uid2-integ-config.json
index a7272a26a..3c267a655 100644
--- a/scripts/aws/conf/integ-uid2-config.json
+++ b/scripts/aws/conf/uid2-integ-config.json
@@ -1,15 +1,16 @@
{
+ "core_attest_url": "https://core-integ.uidapi.com/attest",
+ "optout_api_uri": "https://optout-integ.uidapi.com/optout/replicate",
"sites_metadata_path": "https://core-integ.uidapi.com/sites/refresh",
"clients_metadata_path": "https://core-integ.uidapi.com/clients/refresh",
+ "client_side_keypairs_metadata_path": "https://core-integ.uidapi.com/client_side_keypairs/refresh",
"keysets_metadata_path": "https://core-integ.uidapi.com/key/keyset/refresh",
"keyset_keys_metadata_path": "https://core-integ.uidapi.com/key/keyset-keys/refresh",
- "client_side_keypairs_metadata_path": "https://core-integ.uidapi.com/client_side_keypairs/refresh",
"salts_metadata_path": "https://core-integ.uidapi.com/salt/refresh",
"services_metadata_path": "https://core-integ.uidapi.com/services/refresh",
"service_links_metadata_path": "https://core-integ.uidapi.com/service_links/refresh",
"optout_metadata_path": "https://optout-integ.uidapi.com/optout/refresh",
- "core_attest_url": "https://core-integ.uidapi.com/attest",
- "optout_api_uri": "https://optout-integ.uidapi.com/optout/replicate",
+ "cloud_encryption_keys_metadata_path": "https://core-integ.uidapi.com/cloud_encryption_keys/retrieve",
"optout_s3_folder": "uid-optout-integ/",
- "allow_legacy_api": false
+ "identity_scope": "uid2"
}
diff --git a/scripts/aws/conf/prod-uid2-config.json b/scripts/aws/conf/uid2-prod-config.json
similarity index 79%
rename from scripts/aws/conf/prod-uid2-config.json
rename to scripts/aws/conf/uid2-prod-config.json
index 5da450033..e143f098e 100644
--- a/scripts/aws/conf/prod-uid2-config.json
+++ b/scripts/aws/conf/uid2-prod-config.json
@@ -10,6 +10,7 @@
"service_links_metadata_path": "https://core-prod.uidapi.com/service_links/refresh",
"optout_metadata_path": "https://optout-prod.uidapi.com/optout/refresh",
"core_attest_url": "https://core-prod.uidapi.com/attest",
+ "cloud_encryption_keys_metadata_path": "https://core-prod.uidapi.com/cloud_encryption_keys/retrieve",
"core_api_token": "your-api-token",
"optout_s3_path_compat": false,
"optout_api_uri": "https://optout-prod.uidapi.com/optout/replicate",
@@ -19,8 +20,16 @@
"optout_synthetic_logs_count": 0,
"optout_inmem_cache": true,
"optout_s3_folder": "optout-v2/",
+ "identity_scope": "uid2",
"identity_token_expires_after_seconds": 259200,
"refresh_token_expires_after_seconds": 2592000,
"refresh_identity_token_after_seconds": 3600,
- "allow_legacy_api": false
-}
+ "allow_legacy_api": false,
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "https://core-prod.uidapi.com/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ }
+}
\ No newline at end of file
diff --git a/scripts/aws/config-server/app.py b/scripts/aws/config-server/app.py
index edb80e4d5..c0c94fc63 100644
--- a/scripts/aws/config-server/app.py
+++ b/scripts/aws/config-server/app.py
@@ -10,25 +10,6 @@ def get_config():
with open('/etc/secret/secret-value/config', 'r') as secret_file:
secret_value = secret_file.read().strip()
secret_value_json = json.loads(secret_value)
- secret_value_json["environment"] = secret_value_json["environment"].lower()
- if "core_base_url" in secret_value_json:
- secret_value_json["core_base_url"] = secret_value_json["core_base_url"].lower()
- if "optout_base_url" in secret_value_json:
- secret_value_json["optout_base_url"] = secret_value_json["optout_base_url"].lower()
- if "operator_type" in secret_value_json and secret_value_json["operator_type"].lower() == "public":
- mount_path = '/etc/config/config-values'
- if os.path.exists(mount_path):
- config_keys = [f for f in os.listdir(mount_path) if os.path.isfile(os.path.join(mount_path, f))]
- config = {}
- for k in config_keys:
- with open(os.path.join(mount_path, k), 'r') as value:
- config[k] = value.read()
- try:
- json.loads(config[k])
- config[k] = json.loads(config[k])
- except Exception:
- pass
- secret_value_json.update(config)
return json.dumps(secret_value_json)
except Exception as e:
return str(e), 500
diff --git a/scripts/aws/config-server/requirements.txt b/scripts/aws/config-server/requirements.txt
index 57652a258..8cdd5ef92 100644
--- a/scripts/aws/config-server/requirements.txt
+++ b/scripts/aws/config-server/requirements.txt
@@ -1,3 +1,3 @@
Flask==2.3.2
Werkzeug==3.0.3
-setuptools==70.0.0
+setuptools==70.0.0
\ No newline at end of file
diff --git a/scripts/aws/ec2.py b/scripts/aws/ec2.py
new file mode 100644
index 000000000..a972a290b
--- /dev/null
+++ b/scripts/aws/ec2.py
@@ -0,0 +1,305 @@
+#!/usr/bin/env python3
+
+import boto3
+import json
+import os
+import subprocess
+import re
+import multiprocessing
+import requests
+import signal
+import argparse
+import logging
+from botocore.exceptions import ClientError, NoCredentialsError
+from typing import Dict, List
+import sys
+import time
+import yaml
+logging.basicConfig(level=logging.INFO)
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from confidential_compute import ConfidentialCompute, ConfidentialComputeConfig, InstanceProfileMissingError, OperatorKeyNotFoundError, ConfigurationValueError, ConfidentialComputeStartupError
+
+class AWSConfidentialComputeConfig(ConfidentialComputeConfig):
+ enclave_memory_mb: int
+ enclave_cpu_count: int
+ core_api_token: str
+ optout_api_token: str
+
+class AuxiliaryConfig:
+ FLASK_PORT: str = "27015"
+ LOCALHOST: str = "127.0.0.1"
+ AWS_METADATA: str = "169.254.169.254"
+
+ @classmethod
+ def get_socks_url(cls) -> str:
+ return f"socks5://{cls.LOCALHOST}:3306"
+
+ @classmethod
+ def get_config_url(cls) -> str:
+ return f"http://{cls.LOCALHOST}:{cls.FLASK_PORT}/getConfig"
+
+ @classmethod
+ def get_user_data_url(cls) -> str:
+ return f"http://{cls.AWS_METADATA}/latest/user-data"
+
+ @classmethod
+ def get_token_url(cls) -> str:
+ return f"http://{cls.AWS_METADATA}/latest/api/token"
+
+ @classmethod
+ def get_meta_url(cls) -> str:
+ return f"http://{cls.AWS_METADATA}/latest/dynamic/instance-identity/document"
+
+
+class EC2EntryPoint(ConfidentialCompute):
+
+ def __init__(self):
+ super().__init__()
+
+ def __get_aws_token(self) -> str:
+ """Fetches a temporary AWS EC2 metadata token."""
+ try:
+ response = requests.put(
+ AuxiliaryConfig.get_token_url(), headers={"X-aws-ec2-metadata-token-ttl-seconds": "3600"}, timeout=2
+ )
+ return response.text
+ except requests.RequestException as e:
+ raise RuntimeError(f"Failed to fetch AWS token: {e}")
+
+ def __get_current_region(self) -> str:
+ """Fetches the current AWS region from EC2 instance metadata."""
+ token = self.__get_aws_token()
+ headers = {"X-aws-ec2-metadata-token": token}
+ try:
+ response = requests.get(AuxiliaryConfig.get_meta_url(), headers=headers, timeout=2)
+ response.raise_for_status()
+ return response.json()["region"]
+ except requests.RequestException as e:
+ raise RuntimeError(f"Failed to fetch region: {e}")
+
+ def __validate_aws_specific_config(self):
+ if "enclave_memory_mb" in self.configs or "enclave_cpu_count" in self.configs:
+ max_capacity = self.__get_max_capacity()
+ if self.configs.get('enclave_memory_mb') < 11000 or self.configs.get('enclave_memory_mb') > max_capacity.get('enclave_memory_mb'):
+ raise ConfigurationValueError(self.__class__.__name__, f"enclave_memory_mb must be in range 11000 and {max_capacity.get('enclave_memory_mb')}")
+ if self.configs.get('enclave_cpu_count') < 2 or self.configs.get('enclave_cpu_count') > max_capacity.get('enclave_cpu_count'):
+ raise ConfigurationValueError(self.__class__.__name__, f"enclave_cpu_count must be in range 2 and {max_capacity.get('enclave_cpu_count')}")
+
+ def _set_confidential_config(self, secret_identifier: str) -> None:
+ """Fetches a secret value from AWS Secrets Manager and adds defaults"""
+
+ def add_defaults(configs: Dict[str, any]) -> AWSConfidentialComputeConfig:
+ """Adds default values to configuration if missing. Sets operator_key if only api_token is specified for backward compatibility """
+ default_capacity = self.__get_max_capacity()
+ configs.setdefault("operator_key", configs.get("api_token"))
+ configs.setdefault("enclave_memory_mb", default_capacity["enclave_memory_mb"])
+ configs.setdefault("enclave_cpu_count", default_capacity["enclave_cpu_count"])
+ configs.setdefault("debug_mode", False)
+ configs.setdefault("core_api_token", configs.get("operator_key"))
+ configs.setdefault("optout_api_token", configs.get("operator_key"))
+ return configs
+
+ region = self.__get_current_region()
+ logging.info(f"Running in {region}")
+ client = boto3.client("secretsmanager", region_name=region)
+ try:
+ self.configs = add_defaults(json.loads(client.get_secret_value(SecretId=secret_identifier)["SecretString"]))
+ self.__validate_aws_specific_config()
+ except json.JSONDecodeError as e:
+ raise OperatorKeyNotFoundError(self.__class__.__name__, f"Can not parse secret {secret_identifier} in {region}")
+ except NoCredentialsError as _:
+ raise InstanceProfileMissingError(self.__class__.__name__)
+ except ClientError as _:
+ raise OperatorKeyNotFoundError(self.__class__.__name__, f"Secret Manager {secret_identifier} in {region}")
+
+ @staticmethod
+ def __get_max_capacity():
+ try:
+ with open("/etc/nitro_enclaves/allocator.yaml", "r") as file:
+ nitro_config = yaml.safe_load(file)
+ return {"enclave_memory_mb": nitro_config['memory_mib'], "enclave_cpu_count": nitro_config['cpu_count']}
+ except Exception as e:
+ raise RuntimeError("/etc/nitro_enclaves/allocator.yaml does not have CPU, memory allocated")
+
+ def __setup_vsockproxy(self) -> None:
+ logging.info("Sets up the vSock proxy service")
+ thread_count = (multiprocessing.cpu_count() + 1) // 2
+ command = [
+ "/usr/bin/vsockpx", "-c", "/etc/uid2operator/proxy.yaml",
+ "--workers", str(thread_count), "--daemon"
+ ]
+
+ debug_command = [
+ "/usr/bin/vsockpx", "-c", "/etc/uid2operator/proxy.yaml",
+ "--workers", str(thread_count), "--log-level", "0"
+ ]
+
+ self.run_service([command, debug_command], "vsock_proxy")
+
+ def __run_config_server(self) -> None:
+ logging.info("Starts the Flask configuration server")
+ os.makedirs("/etc/secret/secret-value", exist_ok=True)
+ config_path = "/etc/secret/secret-value/config"
+
+ # Save configs to a file
+ with open(config_path, 'w') as config_file:
+ json.dump(self.configs, config_file)
+
+ os.chdir("/opt/uid2operator/config-server")
+ command = ["./bin/flask", "run", "--host", AuxiliaryConfig.LOCALHOST, "--port", AuxiliaryConfig.FLASK_PORT]
+
+ self.run_service([command, command], "flask_config_server", separate_process=True)
+
+ def __run_socks_proxy(self) -> None:
+ logging.info("Starts the SOCKS proxy service")
+ command = ["sockd", "-D"]
+
+ # -d specifies debug level
+ debug_command = ["sockd", "-d", "0"]
+
+ self.run_service([command, debug_command], "socks_proxy")
+
+ def run_service(self, command: List[List[str]], log_filename: str, separate_process: bool = False) -> None:
+ """
+ Runs a service command with logging if debug_mode is enabled.
+
+ :param command: command[0] regular command, command[1] debug mode command
+ :param log_filename: Base name of the log file (e.g., "flask_config_server", "socks_proxy", "vsock_proxy")
+ :param separate_process: Whether to run in a separate process
+ """
+ log_file = f"/var/log/{log_filename}.log"
+
+ if self.configs.get("debug_mode") is True:
+
+ # Remove old log file to start fresh
+ if os.path.exists(log_file):
+ os.remove(log_file)
+
+ # Set up logging
+ logging.basicConfig(
+ filename=log_file,
+ filemode="w",
+ level=logging.DEBUG,
+ format="%(asctime)s %(levelname)s: %(message)s"
+ )
+
+ logging.info(f"Debug mode is on, logging into {log_file}")
+
+ # Run debug mode command
+ with open(log_file, "a") as log:
+ self.run_command(command[1], separate_process=True, stdout=log, stderr=log)
+ else:
+ # Run regular command, possibly daemon
+ self.run_command(command[0], separate_process=separate_process)
+
+ def __get_secret_name_from_userdata(self) -> str:
+ """Extracts the secret name from EC2 user data."""
+ logging.info("Extracts the secret name from EC2 user data")
+ token = self.__get_aws_token()
+ response = requests.get(AuxiliaryConfig.get_user_data_url(), headers={"X-aws-ec2-metadata-token": token})
+ user_data = response.text
+
+ with open("/opt/uid2operator/identity_scope.txt") as file:
+ identity_scope = file.read().strip()
+
+ default_name = f"{identity_scope.lower()}-operator-config-key"
+ hardcoded_value = f"{identity_scope.upper()}_CONFIG_SECRET_KEY"
+ match = re.search(rf'^export {hardcoded_value}="(.+?)"$', user_data, re.MULTILINE)
+ return match.group(1) if match else default_name
+
+ def _setup_auxiliaries(self) -> None:
+ """Sets up the vsock tunnel, socks proxy and flask server"""
+ self.__setup_vsockproxy()
+ self.__run_config_server()
+ self.__run_socks_proxy()
+ logging.info("Finished setting up all auxiliaries")
+
+ def _validate_auxiliaries(self) -> None:
+ """Validates connection to flask server direct and through socks proxy."""
+ logging.info("Validating auxiliaries")
+ try:
+ for attempt in range(10):
+ try:
+ response = requests.get(AuxiliaryConfig.get_config_url())
+ logging.info("Config server is reachable")
+ break
+ except requests.exceptions.ConnectionError as e:
+ logging.error(f"Connecting to config server, attempt {attempt + 1} failed with ConnectionError: {e}")
+ time.sleep(1)
+ else:
+ raise RuntimeError(f"Config server unreachable")
+ response.raise_for_status()
+ except requests.RequestException as e:
+ raise RuntimeError(f"Failed to get config from config server: {e}")
+ proxies = {"http": AuxiliaryConfig.get_socks_url(), "https": AuxiliaryConfig.get_socks_url()}
+ try:
+ response = requests.get(AuxiliaryConfig.get_config_url(), proxies=proxies)
+ response.raise_for_status()
+ except requests.RequestException as e:
+ raise RuntimeError(f"Cannot connect to config server via SOCKS proxy: {e}")
+ logging.info("Connectivity check to config server passes")
+
+ def __run_nitro_enclave(self):
+ command = [
+ "nitro-cli", "run-enclave",
+ "--eif-path", "/opt/uid2operator/uid2operator.eif",
+ "--memory", str(self.configs["enclave_memory_mb"]),
+ "--cpu-count", str(self.configs["enclave_cpu_count"]),
+ "--enclave-cid", "42",
+ "--enclave-name", "uid2operator"
+ ]
+ if self.configs.get('debug_mode', False):
+ logging.info("Running nitro in debug_mode")
+ command += ["--debug-mode", "--attach-console"]
+ self.run_command(command, separate_process=False)
+
+ def run_compute(self) -> None:
+ """Main execution flow for confidential compute."""
+ secret_manager_key = self.__get_secret_name_from_userdata()
+ self._set_confidential_config(secret_manager_key)
+ logging.info(f"Fetched configs from {secret_manager_key}")
+ if not self.configs.get("skip_validations"):
+ self.validate_configuration()
+ self._setup_auxiliaries()
+ self._validate_auxiliaries()
+ self.__run_nitro_enclave()
+
+ def cleanup(self) -> None:
+ """Terminates the Nitro Enclave and auxiliary processes."""
+ try:
+ self.run_command(["nitro-cli", "terminate-enclave", "--all"])
+ self.__kill_auxiliaries()
+ except subprocess.SubprocessError as e:
+ raise (f"Error during cleanup: {e}")
+
+ def __kill_auxiliaries(self) -> None:
+ """Kills all auxiliary processes spawned."""
+ for process_name in ["vsockpx", "sockd", "flask"]:
+ try:
+ result = subprocess.run(["pgrep", "-f", process_name], stdout=subprocess.PIPE, text=True, check=False)
+ if result.stdout.strip():
+ for pid in result.stdout.strip().split("\n"):
+ os.kill(int(pid), signal.SIGKILL)
+ logging.info(f"Killed process '{process_name}'.")
+ else:
+ logging.info(f"No process named '{process_name}' found.")
+ except Exception as e:
+ logging.error(f"Error killing process '{process_name}': {e}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Manage EC2-based confidential compute workflows.")
+ parser.add_argument("-o", "--operation", choices=["stop", "start"], default="start", help="Operation to perform.")
+ args = parser.parse_args()
+
+ try:
+ ec2 = EC2EntryPoint()
+ if args.operation == "stop":
+ ec2.cleanup()
+ else:
+ ec2.run_compute()
+ except ConfidentialComputeStartupError as e:
+ logging.error(f"Failed starting up Confidential Compute. Please checks the logs for errors and retry {e}")
+ except Exception as e:
+ logging.error(f"Unexpected failure while starting up Confidential Compute. Please contact UID support team with this log {e}")
+
diff --git a/scripts/aws/eks-pod/entrypoint.sh b/scripts/aws/eks-pod/entrypoint.sh
index c506d6cbf..2dc0483e2 100644
--- a/scripts/aws/eks-pod/entrypoint.sh
+++ b/scripts/aws/eks-pod/entrypoint.sh
@@ -3,6 +3,7 @@ CID=42
EIF_PATH=/home/uid2operator.eif
MEMORY_MB=24576
CPU_COUNT=6
+DEBUG_MODE="false"
set -x
@@ -26,7 +27,7 @@ function setup_vsockproxy() {
echo "setup_vsockproxy"
VSOCK_PROXY=${VSOCK_PROXY:-/home/vsockpx}
VSOCK_CONFIG=${VSOCK_CONFIG:-/home/proxies.host.yaml}
- VSOCK_THREADS=${VSOCK_THREADS:-$(( $(nproc) * 2 )) }
+ VSOCK_THREADS=${VSOCK_THREADS:-$(( ( $(nproc) + 1 ) / 2 )) }
VSOCK_LOG_LEVEL=${VSOCK_LOG_LEVEL:-3}
echo "starting vsock proxy at $VSOCK_PROXY with $VSOCK_THREADS worker threads..."
$VSOCK_PROXY -c $VSOCK_CONFIG --workers $VSOCK_THREADS --log-level $VSOCK_LOG_LEVEL --daemon
@@ -87,12 +88,20 @@ function update_config() {
{ set +x; } 2>/dev/null; { CPU_COUNT=$(echo $IDENTITY_SERVICE_CONFIG | jq -r '.enclave_cpu_count'); set -x; }
{ set +x; } 2>/dev/null; { MEMORY_MB=$(echo $IDENTITY_SERVICE_CONFIG | jq -r '.enclave_memory_mb'); set -x; }
fi
+
+ { set +x; } 2>/dev/null; { DEBUG_MODE=$(echo $IDENTITY_SERVICE_CONFIG | jq -r '.debug_mode'); set -x; }
+
shopt -u nocasematch
}
function run_enclave() {
- echo "starting enclave... --cpu-count $CPU_COUNT --memory $MEMORY_MB --eif-path $EIF_PATH --enclave-cid $CID"
- nitro-cli run-enclave --cpu-count $CPU_COUNT --memory $MEMORY_MB --eif-path $EIF_PATH --enclave-cid $CID --enclave-name uid2-operator
+ if [ "$DEBUG_MODE" == "true" ]; then
+ echo "starting enclave... --cpu-count $CPU_COUNT --memory $MEMORY_MB --eif-path $EIF_PATH --enclave-cid $CID --debug-mode --attach-console"
+ nitro-cli run-enclave --cpu-count $CPU_COUNT --memory $MEMORY_MB --eif-path $EIF_PATH --enclave-cid $CID --enclave-name uid2-operator --debug-mode --attach-console
+ else
+ echo "starting enclave... --cpu-count $CPU_COUNT --memory $MEMORY_MB --eif-path $EIF_PATH --enclave-cid $CID"
+ nitro-cli run-enclave --cpu-count $CPU_COUNT --memory $MEMORY_MB --eif-path $EIF_PATH --enclave-cid $CID --enclave-name uid2-operator
+ fi
}
echo "starting ..."
diff --git a/scripts/aws/entrypoint.sh b/scripts/aws/entrypoint.sh
index 32db563fa..c4edba6b1 100755
--- a/scripts/aws/entrypoint.sh
+++ b/scripts/aws/entrypoint.sh
@@ -5,8 +5,10 @@
LOG_FILE="/home/start.txt"
set -x
-exec > $LOG_FILE
-exec 2>&1
+exec &> >(tee -a "$LOG_FILE")
+
+PARAMETERIZED_CONFIG="/app/conf/config-overrides.json"
+OPERATOR_CONFIG="/tmp/final-config.json"
set -o pipefail
ulimit -n 65536
@@ -14,80 +16,74 @@ ulimit -n 65536
# -- setup loopback device
echo "Setting up loopback device..."
ifconfig lo 127.0.0.1
+/usr/sbin/syslog-ng --verbose
# -- start vsock proxy
echo "Starting vsock proxy..."
-/app/vsockpx --config /app/proxies.nitro.yaml --daemon --workers $(( $(nproc) * 2 )) --log-level 3
-
-# -- setup syslog-ng
-echo "Starting syslog-ng..."
-/usr/sbin/syslog-ng --verbose
-
-# -- load config from identity service
-echo "Loading config from identity service via proxy..."
-
-#wait for config service, then download config
-OVERRIDES_CONFIG="/app/conf/config-overrides.json"
-
-RETRY_COUNT=0
-MAX_RETRY=20
-until curl -s -f -o "${OVERRIDES_CONFIG}" -x socks5h://127.0.0.1:3305 http://127.0.0.1:27015/getConfig
-do
- echo "Waiting for config service to be available"
- RETRY_COUNT=$(( RETRY_COUNT + 1))
- if [ $RETRY_COUNT -gt $MAX_RETRY ]; then
- echo "Config Server did not return a response. Exiting"
+/app/vsockpx --config /app/proxies.nitro.yaml --daemon --workers $(( ( $(nproc) + 3 ) / 4 )) --log-level 3
+
+build_parameterized_config() {
+ curl -s -f -o "${PARAMETERIZED_CONFIG}" -x socks5h://127.0.0.1:3305 http://127.0.0.1:27015/getConfig
+ REQUIRED_KEYS=("optout_base_url" "core_base_url" "core_api_token" "optout_api_token" "environment")
+ for key in "${REQUIRED_KEYS[@]}"; do
+ if ! jq -e "has(\"${key}\")" "${PARAMETERIZED_CONFIG}" > /dev/null; then
+ echo "Error: Key '${key}' is missing. Please add it to flask config server"
+ exit 1
+ fi
+ done
+ FILTER=$(printf '. | {')
+ for key in "${REQUIRED_KEYS[@]}"; do
+ FILTER+="$key: .${key}, "
+ done
+ FILTER+="debug_mode: .debug_mode, "
+ FILTER=${FILTER%, }'}'
+ jq "${FILTER}" "${PARAMETERIZED_CONFIG}" > "${PARAMETERIZED_CONFIG}.tmp" && mv "${PARAMETERIZED_CONFIG}.tmp" "${PARAMETERIZED_CONFIG}"
+}
+
+build_operator_config() {
+ CORE_BASE_URL=$(jq -r ".core_base_url" < "${PARAMETERIZED_CONFIG}")
+ OPTOUT_BASE_URL=$(jq -r ".optout_base_url" < "${PARAMETERIZED_CONFIG}")
+ DEPLOYMENT_ENVIRONMENT=$(jq -r ".environment" < "${PARAMETERIZED_CONFIG}")
+ DEBUG_MODE=$(jq -r ".debug_mode" < "${PARAMETERIZED_CONFIG}")
+
+ IDENTITY_SCOPE_LOWER=$(echo "${IDENTITY_SCOPE}" | tr '[:upper:]' '[:lower:]')
+ DEPLOYMENT_ENVIRONMENT_LOWER=$(echo "${DEPLOYMENT_ENVIRONMENT}" | tr '[:upper:]' '[:lower:]')
+ DEFAULT_CONFIG="/app/conf/${IDENTITY_SCOPE_LOWER}-${DEPLOYMENT_ENVIRONMENT_LOWER}-config.json"
+
+ jq -s '.[0] * .[1]' "${DEFAULT_CONFIG}" "${PARAMETERIZED_CONFIG}" > "${OPERATOR_CONFIG}"
+
+ if [[ "$DEPLOYMENT_ENVIRONMENT" == "prod" ]]; then
+ if [[ "$DEBUG_MODE" == "true" ]]; then
+ echo "Cannot run in DEBUG_MODE in production environment. Exiting."
exit 1
+ fi
fi
- sleep 2
-done
-
-# check the config is valid. Querying for a known missing element (empty) makes jq parse the file, but does not echo the results
-if jq empty "${OVERRIDES_CONFIG}"; then
- echo "Identity service returned valid config"
-else
- echo "Failed to get a valid config from identity service"
- exit 1
-fi
-export DEPLOYMENT_ENVIRONMENT=$(jq -r ".environment" < "${OVERRIDES_CONFIG}")
-export CORE_BASE_URL=$(jq -r ".core_base_url" < "${OVERRIDES_CONFIG}")
-export OPTOUT_BASE_URL=$(jq -r ".optout_base_url" < "${OVERRIDES_CONFIG}")
-echo "DEPLOYMENT_ENVIRONMENT=${DEPLOYMENT_ENVIRONMENT}"
-if [ -z "${DEPLOYMENT_ENVIRONMENT}" ]; then
- echo "DEPLOYMENT_ENVIRONMENT cannot be empty"
- exit 1
-fi
-if [ "${DEPLOYMENT_ENVIRONMENT}" != "prod" ] && [ "${DEPLOYMENT_ENVIRONMENT}" != "integ" ]; then
- echo "Unrecognized DEPLOYMENT_ENVIRONMENT ${DEPLOYMENT_ENVIRONMENT}"
- exit 1
-fi
+ #TODO: Remove below logic after remote config management is implemented
+
+ if [[ "$DEPLOYMENT_ENVIRONMENT" != "prod" ]]; then
+ #Allow override of base URL in non-prod environments
+ CORE_PATTERN="https://core.*uidapi.com"
+ OPTOUT_PATTERN="https://optout.*uidapi.com"
+ if [[ "$IDENTITY_SCOPE_LOWER" == "euid" ]]; then
+ CORE_PATTERN="https://core.*euid.eu"
+ OPTOUT_PATTERN="https://optout.*euid.eu"
+ fi
+ sed -i "s#${CORE_PATTERN}#${CORE_BASE_URL}#g" "${OPERATOR_CONFIG}"
+ sed -i "s#${OPTOUT_PATTERN}#${OPTOUT_BASE_URL}#g" "${OPERATOR_CONFIG}"
+ fi
+
+}
-echo "Loading config final..."
-export FINAL_CONFIG="/app/conf/config-final.json"
-if [ "${IDENTITY_SCOPE}" = "UID2" ]; then
- python3 /app/make_config.py /app/conf/prod-uid2-config.json /app/conf/integ-uid2-config.json ${OVERRIDES_CONFIG} "$(nproc)" > ${FINAL_CONFIG}
-elif [ "${IDENTITY_SCOPE}" = "EUID" ]; then
- python3 /app/make_config.py /app/conf/prod-euid-config.json /app/conf/integ-euid-config.json ${OVERRIDES_CONFIG} "$(nproc)" > ${FINAL_CONFIG}
-else
- echo "Unrecognized IDENTITY_SCOPE ${IDENTITY_SCOPE}"
- exit 1
-fi
+build_parameterized_config
+build_operator_config
-# -- replace base URLs if both CORE_BASE_URL and OPTOUT_BASE_URL are provided
-# -- using hardcoded domains is fine because they should not be changed frequently
-if [ -n "${CORE_BASE_URL}" ] && [ "${CORE_BASE_URL}" != "null" ] && [ -n "${OPTOUT_BASE_URL}" ] && [ "${OPTOUT_BASE_URL}" != "null" ] && [ "${DEPLOYMENT_ENVIRONMENT}" != "prod" ]; then
- echo "Replacing core and optout URLs by ${CORE_BASE_URL} and ${OPTOUT_BASE_URL}..."
- sed -i "s#https://core-integ.uidapi.com#${CORE_BASE_URL}#g" "${FINAL_CONFIG}"
- sed -i "s#https://core-prod.uidapi.com#${CORE_BASE_URL}#g" "${FINAL_CONFIG}"
- sed -i "s#https://core.integ.euid.eu#${CORE_BASE_URL}#g" "${FINAL_CONFIG}"
- sed -i "s#https://core.prod.euid.eu#${CORE_BASE_URL}#g" "${FINAL_CONFIG}"
+DEBUG_MODE=$(jq -r ".debug_mode" < "${OPERATOR_CONFIG}")
+LOGBACK_CONF="./conf/logback.xml"
- sed -i "s#https://optout-integ.uidapi.com#${OPTOUT_BASE_URL}#g" "${FINAL_CONFIG}"
- sed -i "s#https://optout-prod.uidapi.com#${OPTOUT_BASE_URL}#g" "${FINAL_CONFIG}"
- sed -i "s#https://optout.integ.euid.eu#${OPTOUT_BASE_URL}#g" "${FINAL_CONFIG}"
- sed -i "s#https://optout.prod.euid.eu#${OPTOUT_BASE_URL}#g" "${FINAL_CONFIG}"
+if [[ "$DEBUG_MODE" == "true" ]]; then
+ LOGBACK_CONF="./conf/logback-debug.xml"
fi
# -- set pwd to /app so we can find default configs
@@ -95,12 +91,14 @@ cd /app
# -- start operator
echo "Starting Java application..."
+
java \
-XX:MaxRAMPercentage=95 -XX:-UseCompressedOops -XX:+PrintFlagsFinal \
-Djava.security.egd=file:/dev/./urandom \
-Djava.library.path=/app/lib \
- -Dvertx-config-path="${FINAL_CONFIG}" \
+ -Dvertx-config-path="${OPERATOR_CONFIG}" \
-Dvertx.logger-delegate-factory-class-name=io.vertx.core.logging.SLF4JLogDelegateFactory \
- -Dlogback.configurationFile=./conf/logback.xml \
+ -Dlogback.configurationFile=${LOGBACK_CONF} \
-Dhttp_proxy=socks5://127.0.0.1:3305 \
-jar /app/"${JAR_NAME}"-"${JAR_VERSION}".jar
+
diff --git a/scripts/aws/load_config.py b/scripts/aws/load_config.py
deleted file mode 100644
index 9f0446a49..000000000
--- a/scripts/aws/load_config.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import os
-import boto3
-import base64
-import json
-from botocore.exceptions import ClientError
-
-secret_name = os.environ['UID2_CONFIG_SECRET_KEY']
-region_name = os.environ['AWS_REGION_NAME']
-aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID']
-secret_key = os.environ['AWS_SECRET_KEY']
-session_token = os.environ['AWS_SESSION_TOKEN']
-
-def get_secret():
- session = boto3.session.Session()
- client = session.client(
- service_name='secretsmanager',
- region_name=region_name,
- aws_access_key_id = aws_access_key_id,
- aws_secret_access_key = secret_key,
- aws_session_token = session_token
- )
- try:
- get_secret_value_response = client.get_secret_value(
- SecretId=secret_name
- )
- except ClientError as e:
- raise e
- else:
- if 'SecretString' in get_secret_value_response:
- secret = get_secret_value_response['SecretString']
- else:
- decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
-
- return secret
-
-def get_config():
- result = get_secret()
- conf = json.loads(result)
- print(result)
-
-get_config()
diff --git a/scripts/aws/make_config.py b/scripts/aws/make_config.py
deleted file mode 100644
index 5777dce61..000000000
--- a/scripts/aws/make_config.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import json
-import sys
-
-
-def load_json(path):
- with open(path, 'r') as f:
- return json.load(f)
-
-
-def apply_override(config, overrides, key, type):
- value = overrides.get(key)
- if value is not None:
- config[key] = type(value)
-
-
-config_path = sys.argv[1]
-integ_config_path = sys.argv[2]
-overrides_path = sys.argv[3]
-thread_count = int(sys.argv[4])
-
-config = load_json(config_path)
-overrides = load_json(overrides_path)
-
-# set API key
-config['core_api_token'] = overrides['api_token']
-config['optout_api_token'] = overrides['api_token']
-
-# number of threads
-config['service_instances'] = thread_count
-
-# environment
-if overrides.get('environment') == 'integ':
- integ_config = load_json(integ_config_path)
- apply_override(config, integ_config, 'sites_metadata_path', str)
- apply_override(config, integ_config, 'clients_metadata_path', str)
- apply_override(config, integ_config, 'keysets_metadata_path', str)
- apply_override(config, integ_config, 'keyset_keys_metadata_path', str)
- apply_override(config, integ_config, 'client_side_keypairs_metadata_path', str)
- apply_override(config, integ_config, 'salts_metadata_path', str)
- apply_override(config, integ_config, 'services_metadata_path', str)
- apply_override(config, integ_config, 'service_links_metadata_path', str)
- apply_override(config, integ_config, 'optout_metadata_path', str)
- apply_override(config, integ_config, 'core_attest_url', str)
- apply_override(config, integ_config, 'optout_api_uri', str)
- apply_override(config, integ_config, 'optout_s3_folder', str)
-
-
-apply_override(config, overrides, 'operator_type', str)
-if 'operator_type' in config and config['operator_type'] == 'public':
- config.update(overrides)
-else:
- # allowed overrides
- apply_override(config, overrides, 'loki_enabled', bool)
- apply_override(config, overrides, 'optout_synthetic_logs_enabled', bool)
- apply_override(config, overrides, 'optout_synthetic_logs_count', int)
-
-print(json.dumps(config))
diff --git a/scripts/aws/pipeline/amazonlinux2023.Dockerfile b/scripts/aws/pipeline/amazonlinux2023.Dockerfile
index 2914c9ee3..79bcd66df 100644
--- a/scripts/aws/pipeline/amazonlinux2023.Dockerfile
+++ b/scripts/aws/pipeline/amazonlinux2023.Dockerfile
@@ -4,8 +4,9 @@ FROM amazonlinux:2023
RUN dnf update -y
# systemd is not a hard requirement for Amazon ECS Anywhere, but the installation script currently only supports systemd to run.
# Amazon ECS Anywhere can be used without systemd, if you set up your nodes and register them into your ECS cluster **without** the installation script.
-RUN dnf -y groupinstall "Development Tools"
-RUN dnf -y install systemd vim-common wget git tar libstdc++-static.x86_64 cmake cmake3 aws-nitro-enclaves-cli aws-nitro-enclaves-cli-devel
+RUN dnf -y groupinstall "Development Tools" \
+ && dnf -y install systemd vim-common wget git tar libstdc++-static.x86_64 cmake cmake3 aws-nitro-enclaves-cli aws-nitro-enclaves-cli-devel \
+ && dnf clean all
RUN systemctl enable docker
@@ -14,12 +15,14 @@ RUN wget https://www.inet.no/dante/files/dante-1.4.3.tar.gz \
&& sha256sum --check dante_checksum \
&& tar -xf dante-1.4.3.tar.gz \
&& cd dante-1.4.3; ./configure; make; cd .. \
- && cp dante-1.4.3/sockd/sockd ./
+ && cp dante-1.4.3/sockd/sockd ./ \
+ && rm -rf dante-1.4.3 dante-1.4.3.tar.gz
RUN git clone https://github.com/IABTechLab/uid2-aws-enclave-vsockproxy.git \
&& mkdir uid2-aws-enclave-vsockproxy/build \
&& cd uid2-aws-enclave-vsockproxy/build; cmake .. -DCMAKE_BUILD_TYPE=RelWithDebInfo; make; cd ../.. \
- && cp uid2-aws-enclave-vsockproxy/build/vsock-bridge/src/vsock-bridge ./vsockpx
+ && cp uid2-aws-enclave-vsockproxy/build/vsock-bridge/src/vsock-bridge ./vsockpx \
+ && rm -rf uid2-aws-enclave-vsockproxy
COPY ./scripts/aws/pipeline/aws_nitro_eif.sh /aws_nitro_eif.sh
diff --git a/scripts/aws/pipeline/aws_nitro_eif.sh b/scripts/aws/pipeline/aws_nitro_eif.sh
index 2d8f0216b..904d3f3ea 100644
--- a/scripts/aws/pipeline/aws_nitro_eif.sh
+++ b/scripts/aws/pipeline/aws_nitro_eif.sh
@@ -10,5 +10,6 @@ while (! docker stats --no-stream >/dev/null 2>&1); do
sleep 1
done
docker load -i $1.tar
+rm -f $1.tar
nitro-cli build-enclave --docker-uri $1 --output-file $1.eif
nitro-cli describe-eif --eif-path $1.eif | jq -r '.Measurements.PCR0' | xxd -r -p | base64 > pcr0.txt
diff --git a/scripts/aws/requirements.txt b/scripts/aws/requirements.txt
new file mode 100644
index 000000000..421faba98
--- /dev/null
+++ b/scripts/aws/requirements.txt
@@ -0,0 +1,4 @@
+requests[socks]==2.32.3
+boto3==1.35.59
+urllib3==1.26.20
+PyYAML===6.0.2
\ No newline at end of file
diff --git a/scripts/aws/sockd.conf b/scripts/aws/sockd.conf
index 6e8814445..1f903407c 100644
--- a/scripts/aws/sockd.conf
+++ b/scripts/aws/sockd.conf
@@ -3,10 +3,11 @@ external: ens5
user.notprivileged: ec2-user
clientmethod: none
socksmethod: none
+logoutput: stderr
client pass {
from: 127.0.0.1/32 to: 127.0.0.1/32
- log: error # connect disconnect iooperation
+ log: error connect # disconnect iooperation
}
socks pass {
diff --git a/scripts/aws/start.sh b/scripts/aws/start.sh
deleted file mode 100644
index 440ae58d7..000000000
--- a/scripts/aws/start.sh
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/bin/bash
-
-echo "$HOSTNAME" > /etc/uid2operator/HOSTNAME
-EIF_PATH=${EIF_PATH:-/opt/uid2operator/uid2operator.eif}
-IDENTITY_SCOPE=${IDENTITY_SCOPE:-$(cat /opt/uid2operator/identity_scope.txt)}
-CID=${CID:-42}
-TOKEN=$(curl --request PUT "http://169.254.169.254/latest/api/token" --header "X-aws-ec2-metadata-token-ttl-seconds: 3600")
-USER_DATA=$(curl -s http://169.254.169.254/latest/user-data --header "X-aws-ec2-metadata-token: $TOKEN")
-AWS_REGION_NAME=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document/ --header "X-aws-ec2-metadata-token: $TOKEN" | jq -r '.region')
-if [ "$IDENTITY_SCOPE" = 'UID2' ]; then
- UID2_CONFIG_SECRET_KEY=$([[ "$(echo "${USER_DATA}" | grep UID2_CONFIG_SECRET_KEY=)" =~ ^export\ UID2_CONFIG_SECRET_KEY=\"(.*)\"$ ]] && echo "${BASH_REMATCH[1]}" || echo "uid2-operator-config-key")
-elif [ "$IDENTITY_SCOPE" = 'EUID' ]; then
- UID2_CONFIG_SECRET_KEY=$([[ "$(echo "${USER_DATA}" | grep EUID_CONFIG_SECRET_KEY=)" =~ ^export\ EUID_CONFIG_SECRET_KEY=\"(.*)\"$ ]] && echo "${BASH_REMATCH[1]}" || echo "euid-operator-config-key")
-else
- echo "Unrecognized IDENTITY_SCOPE $IDENTITY_SCOPE"
- exit 1
-fi
-CORE_BASE_URL=$([[ "$(echo "${USER_DATA}" | grep CORE_BASE_URL=)" =~ ^export\ CORE_BASE_URL=\"(.*)\"$ ]] && echo "${BASH_REMATCH[1]}" || echo "")
-OPTOUT_BASE_URL=$([[ "$(echo "${USER_DATA}" | grep OPTOUT_BASE_URL=)" =~ ^export\ OPTOUT_BASE_URL=\"(.*)\"$ ]] && echo "${BASH_REMATCH[1]}" || echo "")
-
-echo "UID2_CONFIG_SECRET_KEY=${UID2_CONFIG_SECRET_KEY}"
-echo "CORE_BASE_URL=${CORE_BASE_URL}"
-echo "OPTOUT_BASE_URL=${OPTOUT_BASE_URL}"
-echo "AWS_REGION_NAME=${AWS_REGION_NAME}"
-
-function terminate_old_enclave() {
- ENCLAVE_ID=$(nitro-cli describe-enclaves | jq -r ".[0].EnclaveID")
- [ "$ENCLAVE_ID" != "null" ] && nitro-cli terminate-enclave --enclave-id ${ENCLAVE_ID}
-}
-
-function config_aws() {
- aws configure set default.region $AWS_REGION_NAME
-}
-
-function default_cpu() {
- target=$(( $(nproc) * 3 / 4 ))
- if [ $target -lt 2 ]; then
- target="2"
- fi
- echo $target
-}
-
-function default_mem() {
- target=$(( $(grep MemTotal /proc/meminfo | awk '{print $2}') * 3 / 4000 ))
- if [ $target -lt 24576 ]; then
- target="24576"
- fi
- echo $target
-}
-
-function read_allocation() {
- USER_CUSTOMIZED=$(aws secretsmanager get-secret-value --secret-id "$UID2_CONFIG_SECRET_KEY" | jq -r '.SecretString' | jq -r '.customize_enclave')
- shopt -s nocasematch
- if [ "$USER_CUSTOMIZED" = "true" ]; then
- echo "Applying user customized CPU/Mem allocation..."
- CPU_COUNT=${CPU_COUNT:-$(aws secretsmanager get-secret-value --secret-id "$UID2_CONFIG_SECRET_KEY" | jq -r '.SecretString' | jq -r '.enclave_cpu_count')}
- MEMORY_MB=${MEMORY_MB:-$(aws secretsmanager get-secret-value --secret-id "$UID2_CONFIG_SECRET_KEY" | jq -r '.SecretString' | jq -r '.enclave_memory_mb')}
- else
- echo "Applying default CPU/Mem allocation..."
- CPU_COUNT=6
- MEMORY_MB=24576
- fi
- shopt -u nocasematch
-}
-
-
-function update_allocation() {
- ALLOCATOR_YAML=/etc/nitro_enclaves/allocator.yaml
- if [ -z "$CPU_COUNT" ] || [ -z "$MEMORY_MB" ]; then
- echo 'No CPU_COUNT or MEMORY_MB set, cannot start enclave'
- exit 1
- fi
- echo "updating allocator: CPU_COUNT=$CPU_COUNT, MEMORY_MB=$MEMORY_MB..."
- systemctl stop nitro-enclaves-allocator.service
- sed -r "s/^(\s*memory_mib\s*:\s*).*/\1$MEMORY_MB/" -i $ALLOCATOR_YAML
- sed -r "s/^(\s*cpu_count\s*:\s*).*/\1$CPU_COUNT/" -i $ALLOCATOR_YAML
- systemctl start nitro-enclaves-allocator.service && systemctl enable nitro-enclaves-allocator.service
- echo "nitro-enclaves-allocator restarted"
-}
-
-function setup_vsockproxy() {
- VSOCK_PROXY=${VSOCK_PROXY:-/usr/bin/vsockpx}
- VSOCK_CONFIG=${VSOCK_CONFIG:-/etc/uid2operator/proxy.yaml}
- VSOCK_THREADS=${VSOCK_THREADS:-$(( $(nproc) * 2 )) }
- VSOCK_LOG_LEVEL=${VSOCK_LOG_LEVEL:-3}
- echo "starting vsock proxy at $VSOCK_PROXY with $VSOCK_THREADS worker threads..."
- $VSOCK_PROXY -c $VSOCK_CONFIG --workers $VSOCK_THREADS --log-level $VSOCK_LOG_LEVEL --daemon
- echo "vsock proxy now running in background."
-}
-
-function setup_dante() {
- sockd -D
-}
-
-function run_config_server() {
- mkdir -p /etc/secret/secret-value
- {
- set +x; # Disable tracing within this block
- 2>/dev/null;
- SECRET_JSON=$(aws secretsmanager get-secret-value --secret-id "$UID2_CONFIG_SECRET_KEY" | jq -r '.SecretString')
- echo "${SECRET_JSON}" > /etc/secret/secret-value/config;
- }
- echo $(jq ".core_base_url = \"$CORE_BASE_URL\"" /etc/secret/secret-value/config) > /etc/secret/secret-value/config
- echo $(jq ".optout_base_url = \"$OPTOUT_BASE_URL\"" /etc/secret/secret-value/config) > /etc/secret/secret-value/config
- echo "run_config_server"
- cd /opt/uid2operator/config-server
- ./bin/flask run --host 127.0.0.1 --port 27015 &
-}
-
-function run_enclave() {
- echo "starting enclave..."
- nitro-cli run-enclave --eif-path $EIF_PATH --memory $MEMORY_MB --cpu-count $CPU_COUNT --enclave-cid $CID --enclave-name uid2operator
-}
-
-terminate_old_enclave
-config_aws
-read_allocation
-# update_allocation
-setup_vsockproxy
-setup_dante
-run_config_server
-run_enclave
-
-echo "Done!"
diff --git a/scripts/aws/stop.sh b/scripts/aws/stop.sh
deleted file mode 100644
index c37bdc729..000000000
--- a/scripts/aws/stop.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-function terminate_old_enclave() {
- echo "Terminating Enclave..."
- ENCLAVE_ID=$(nitro-cli describe-enclaves | jq -r ".[0].EnclaveID")
- if [ "$ENCLAVE_ID" != "null" ]; then
- nitro-cli terminate-enclave --enclave-id $ENCLAVE_ID
- else
- echo "no running enclaves to terminate"
- fi
-}
-
-function kill_process() {
- echo "Shutting down $1..."
- pid=$(pidof $1)
- if [ -z "$pid" ]; then
- echo "process $1 not found"
- else
- kill -9 $pid
- echo "$1 exited"
- fi
-}
-
-terminate_old_enclave
-kill_process vsockpx
-kill_process sockd
-# we start aws vsock-proxy via nohup
-kill_process vsock-proxy
-kill_process nohup
-
-echo "Done!"
diff --git a/scripts/aws/uid2-operator-ami/ansible/playbook.yml b/scripts/aws/uid2-operator-ami/ansible/playbook.yml
index 84c6c6f14..85f20e3ac 100644
--- a/scripts/aws/uid2-operator-ami/ansible/playbook.yml
+++ b/scripts/aws/uid2-operator-ami/ansible/playbook.yml
@@ -70,27 +70,34 @@
requirements: /opt/uid2operator/config-server/requirements.txt
virtualenv_command: 'python3 -m venv'
+ - name: Install requirements.txt for enclave init
+ ansible.builtin.copy:
+ src: /tmp/artifacts/requirements.txt
+ dest: /opt/uid2operator/requirements.txt
+ remote_src: yes
+
- name: Install starter script
ansible.builtin.copy:
- src: /tmp/artifacts/start.sh
- dest: /opt/uid2operator/start.sh
+ src: /tmp/artifacts/ec2.py
+ dest: /opt/uid2operator/ec2.py
remote_src: yes
- name: Make starter script executable
ansible.builtin.file:
- path: /opt/uid2operator/start.sh
+ path: /opt/uid2operator/ec2.py
mode: '0755'
- - name: Install stopper script
+ - name: Copy confidential_compute script
ansible.builtin.copy:
- src: /tmp/artifacts/stop.sh
- dest: /opt/uid2operator/stop.sh
+ src: /tmp/artifacts/confidential_compute.py
+ dest: /opt/uid2operator/confidential_compute.py
remote_src: yes
- - name: Make starter script executable
- ansible.builtin.file:
- path: /opt/uid2operator/stop.sh
- mode: '0755'
+ - name: Create virtualenv for eif init
+ ansible.builtin.pip:
+ virtualenv: /opt/uid2operator/init
+ requirements: /opt/uid2operator/requirements.txt
+ virtualenv_command: 'python3.11 -m venv'
- name: Install Operator EIF
ansible.builtin.copy:
diff --git a/scripts/aws/uid2operator.service b/scripts/aws/uid2operator.service
index 1d36b7a91..56559e3c2 100644
--- a/scripts/aws/uid2operator.service
+++ b/scripts/aws/uid2operator.service
@@ -8,8 +8,8 @@ RemainAfterExit=true
StandardOutput=journal
StandardError=journal
SyslogIdentifier=uid2operator
-ExecStart=/opt/uid2operator/start.sh
-ExecStop=/opt/uid2operator/stop.sh
+ExecStart=/opt/uid2operator/init/bin/python /opt/uid2operator/ec2.py
+ExecStop=/opt/uid2operator/init/bin/python /opt/uid2operator/ec2.py -o stop
[Install]
-WantedBy=multi-user.target
\ No newline at end of file
+WantedBy=multi-user.target
diff --git a/scripts/azure-aks/deployment/generate-deployment-artifacts.sh b/scripts/azure-aks/deployment/generate-deployment-artifacts.sh
new file mode 100644
index 000000000..7a3b7db10
--- /dev/null
+++ b/scripts/azure-aks/deployment/generate-deployment-artifacts.sh
@@ -0,0 +1,121 @@
+#!/usr/bin/env bash
+set -x
+
+# Following environment variables must be set
+# - IMAGE: uid2-operator image
+# - OUTPUT_DIR: output directory to store the artifacts
+# - MANIFEST_DIR: output directory to store the manifest for the enclave Id
+# - VERSION_NUMBER: the version number of the build
+
+SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
+INPUT_DIR=${SCRIPT_DIR}
+
+if [[ -z ${IMAGE} ]]; then
+ echo "IMAGE cannot be empty"
+ exit 1
+fi
+IMAGE_VERSION=$(echo $IMAGE | awk -F':' '{print $2}')
+if [[ -z ${IMAGE_VERSION} ]]; then
+ echo "Failed to extract image version from ${IMAGE}"
+ exit 1
+fi
+
+if [[ -z ${OUTPUT_DIR} ]]; then
+ echo "OUTPUT_DIR cannot be empty"
+ exit 1
+fi
+
+mkdir -p ${OUTPUT_DIR}
+if [[ $? -ne 0 ]]; then
+ echo "Failed to create ${OUTPUT_DIR}"
+ exit 1
+fi
+
+mkdir -p ${MANIFEST_DIR}
+if [[ $? -ne 0 ]]; then
+ echo "Failed to create ${MANIFEST_DIR}"
+ exit 1
+fi
+
+# Input files
+INPUT_FILES=(
+ operator.yaml
+)
+
+# Copy input files to output dir
+for f in ${INPUT_FILES[@]}; do
+ cp ${INPUT_DIR}/${f} ${OUTPUT_DIR}/${f}
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to copy ${INPUT_DIR}/${f} to ${OUTPUT_DIR}"
+ exit 1
+ fi
+done
+
+az version
+# Install confcom extension, az is originally available in GitHub workflow environment
+az extension add --name confcom
+if [[ $? -ne 0 ]]; then
+ echo "Failed to install Azure confcom extension"
+ exit 1
+fi
+
+# Required by az confcom
+sudo usermod -aG docker ${USER}
+if [[ $? -ne 0 ]]; then
+ echo "Failed to add current user to docker group"
+ exit 1
+fi
+
+# Generate operator template
+sed -i "s#IMAGE_PLACEHOLDER#${IMAGE}#g" ${OUTPUT_DIR}/operator.yaml
+# && \
+# sed -i "s#IMAGE_VERSION_PLACEHOLDER#${IMAGE_VERSION}#g" ${OUTPUT_DIR}/operator.yaml
+if [[ $? -ne 0 ]]; then
+ echo "Failed to pre-process operator template file"
+ exit 1
+fi
+
+# Export the policy, update it to turn off allow_environment_variable_dropping, and then insert it into the template
+# note that the EnclaveId is generated by generate.py on the raw policy, not the base64 version
+POLICY_DIGEST_FILE=azure-aks-operator-digest-$VERSION_NUMBER.txt
+az confcom acipolicygen --virtual-node-yaml ${OUTPUT_DIR}/operator.yaml --print-policy > ${INPUT_DIR}/policy.base64
+if [[ $? -ne 0 ]]; then
+ echo "Failed to generate ACI policy"
+ exit 1
+fi
+
+base64 -di < ${INPUT_DIR}/policy.base64 > ${INPUT_DIR}/generated.rego
+if [[ $? -ne 0 ]]; then
+ echo "Failed to base64-decode policy"
+ exit 1
+fi
+
+sed --in-place \
+ -e "s#allow_environment_variable_dropping := true#allow_environment_variable_dropping := false#g" \
+ -e 's#{"pattern":"DEPLOYMENT_ENVIRONMENT=DEPLOYMENT_ENVIRONMENT_PLACEHOLDER","required":false,"strategy":"string"}#{"pattern":"DEPLOYMENT_ENVIRONMENT=.+","required":false,"strategy":"re2"}#g' \
+ -e 's#{"pattern":"VAULT_NAME=VAULT_NAME_PLACEHOLDER","required":false,"strategy":"string"}#{"pattern":"VAULT_NAME=.+","required":false,"strategy":"re2"}#g' \
+ -e 's#{"pattern":"OPERATOR_KEY_SECRET_NAME=OPERATOR_KEY_SECRET_NAME_PLACEHOLDER","required":false,"strategy":"string"}#{"pattern":"OPERATOR_KEY_SECRET_NAME=.+","required":false,"strategy":"re2"}#g' \
+ ${INPUT_DIR}/generated.rego
+if [[ $? -ne 0 ]]; then
+ echo "Failed to replace placeholders in policy file"
+ exit 1
+fi
+
+base64 -w0 < ${INPUT_DIR}/generated.rego > ${INPUT_DIR}/generated.rego.base64
+if [[ $? -ne 0 ]]; then
+ echo "Failed to base64-encode policy file"
+ exit 1
+fi
+
+python3 ${SCRIPT_DIR}/../../azure-cc/deployment/generate.py ${INPUT_DIR}/generated.rego > ${MANIFEST_DIR}/${POLICY_DIGEST_FILE}
+if [[ $? -ne 0 ]]; then
+ echo "Failed to generate digest from policy file"
+ exit 1
+fi
+
+sed --in-place "s#CCE_POLICY_PLACEHOLDER#$(cat ${INPUT_DIR}/generated.rego.base64)#g" ${OUTPUT_DIR}/operator.yaml
+if [[ $? -ne 0 ]]; then
+ echo "Failed to replace placeholder in operator.yaml"
+ exit 1
+fi
+
diff --git a/scripts/azure-aks/deployment/operator.yaml b/scripts/azure-aks/deployment/operator.yaml
new file mode 100644
index 000000000..f7fc34c05
--- /dev/null
+++ b/scripts/azure-aks/deployment/operator.yaml
@@ -0,0 +1,91 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: operator-deployment
+spec:
+ replicas: 3
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: operator
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: operator
+ annotations:
+ microsoft.containerinstance.virtualnode.ccepolicy: CCE_POLICY_PLACEHOLDER
+ microsoft.containerinstance.virtualnode.identity: IDENTITY_PLACEHOLDER
+ microsoft.containerinstance.virtualnode.injectdns: "false"
+ spec:
+ containers:
+ - image: "mcr.microsoft.com/aci/skr:2.7"
+ imagePullPolicy: Always
+ name: skr
+ resources:
+ limits:
+ cpu: 2250m
+ memory: 2256Mi
+ requests:
+ cpu: 100m
+ memory: 512Mi
+ env:
+ - name: Port
+ value: "9000"
+ volumeMounts:
+ - mountPath: /opt/confidential-containers/share/kata-containers/reference-info-base64
+ name: endorsement-location
+ command:
+ - /skr.sh
+ - name: uid2-operator
+ image: IMAGE_PLACEHOLDER
+ resources:
+ limits:
+ memory: "8Gi"
+ imagePullPolicy: Always
+ securityContext:
+ runAsUser: 1000
+ env:
+ - name: VAULT_NAME
+ value: VAULT_NAME_PLACEHOLDER
+ - name: OPERATOR_KEY_SECRET_NAME
+ value: OPERATOR_KEY_SECRET_NAME_PLACEHOLDER
+ - name: DEPLOYMENT_ENVIRONMENT
+ value: DEPLOYMENT_ENVIRONMENT_PLACEHOLDER
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - name: prometheus
+ containerPort: 9080
+ protocol: TCP
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ops/healthcheck
+ port: 8080
+ scheme: HTTP
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 1
+ volumes:
+ - name: endorsement-location
+ hostPath:
+ path: /opt/confidential-containers/share/kata-containers/reference-info-base64
+ nodeSelector:
+ virtualization: virtualnode2
+ tolerations:
+ - effect: NoSchedule
+ key: virtual-kubelet.io/provider
+ operator: Exists
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: operator-svc
+spec:
+ type: LoadBalancer
+ selector:
+ app.kubernetes.io/name: operator
+ ports:
+ - protocol: TCP
+ port: 80
+ targetPort: 8080
diff --git a/scripts/azure-cc/Dockerfile b/scripts/azure-cc/Dockerfile
index bb0c96b70..91b6c8096 100644
--- a/scripts/azure-cc/Dockerfile
+++ b/scripts/azure-cc/Dockerfile
@@ -1,13 +1,24 @@
-# sha from https://hub.docker.com/layers/amd64/eclipse-temurin/21.0.4_7-jre-alpine/images/sha256-8179ddc8a6c5ac9af935020628763b9a5a671e0914976715d2b61b21881cefca
-FROM eclipse-temurin@sha256:8179ddc8a6c5ac9af935020628763b9a5a671e0914976715d2b61b21881cefca
+# sha from https://hub.docker.com/layers/amd64/eclipse-temurin/21.0.6_7-jre-alpine/images/sha256-f184bb601f9e6068dd0a92738764d1ff447ab68c15ddbf8c303c5c29de9a1df8
+FROM eclipse-temurin@sha256:f184bb601f9e6068dd0a92738764d1ff447ab68c15ddbf8c303c5c29de9a1df8
-# Install Packages
-RUN apk update && apk add jq
+# Install necessary packages and set up virtual environment
+RUN apk update && apk add --no-cache jq python3 py3-pip && \
+ python3 -m venv /venv && \
+ . /venv/bin/activate && \
+ pip install --no-cache-dir requests azure-identity azure-keyvault-secrets && \
+ rm -rf /var/cache/apk/*
+# Set virtual environment path
+ENV PATH="/venv/bin:$PATH"
+
+# Working directory
WORKDIR /app
+
+# Expose necessary ports
EXPOSE 8080
EXPOSE 9080
+# ARG and ENV variables
ARG JAR_NAME=uid2-operator
ARG JAR_VERSION=1.0.0-SNAPSHOT
ARG IMAGE_VERSION=1.0.0.unknownhash
@@ -15,20 +26,29 @@ ENV JAR_NAME=${JAR_NAME}
ENV JAR_VERSION=${JAR_VERSION}
ENV IMAGE_VERSION=${IMAGE_VERSION}
ENV REGION=default
-ENV LOKI_HOSTNAME=loki
+# Copy application files
COPY ./target/${JAR_NAME}-${JAR_VERSION}-jar-with-dependencies.jar /app/${JAR_NAME}-${JAR_VERSION}.jar
COPY ./target/${JAR_NAME}-${JAR_VERSION}-sources.jar /app
COPY ./target/${JAR_NAME}-${JAR_VERSION}-static.tar.gz /app/static.tar.gz
COPY ./conf/*.json /app/conf/
COPY ./conf/*.xml /app/conf/
-RUN tar xzvf /app/static.tar.gz --no-same-owner --no-same-permissions && rm -f /app/static.tar.gz
+# Extract and clean up tar.gz
+RUN tar xzvf /app/static.tar.gz --no-same-owner --no-same-permissions && \
+ rm -f /app/static.tar.gz
+
+COPY ./azureEntryPoint.py /app
+COPY ./confidential_compute.py /app
+RUN chmod a+x /app/*.py
-COPY ./entrypoint.sh /app/
-RUN chmod a+x /app/entrypoint.sh
+# Create and configure non-root user
+RUN adduser -D uid2-operator && \
+ mkdir -p /opt/uid2 && chmod 777 -R /opt/uid2 && \
+ chmod 705 -R /app && mkdir -p /app/file-uploads && chmod 777 -R /app/file-uploads
-RUN adduser -D uid2-operator && mkdir -p /opt/uid2 && chmod 777 -R /opt/uid2 && mkdir -p /app && chmod 705 -R /app && mkdir -p /app/file-uploads && chmod 777 -R /app/file-uploads
+# Switch to non-root user
USER uid2-operator
-CMD ["/app/entrypoint.sh"]
+# Run the Python entry point
+CMD python3 /app/azureEntryPoint.py
\ No newline at end of file
diff --git a/scripts/azure-cc/azureEntryPoint.py b/scripts/azure-cc/azureEntryPoint.py
new file mode 100644
index 000000000..ffa49b3c7
--- /dev/null
+++ b/scripts/azure-cc/azureEntryPoint.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python3
+
+import json
+import os
+import time
+from typing import Dict
+import sys
+import shutil
+import requests
+import logging
+from confidential_compute import ConfidentialCompute, ConfigurationMissingError, OperatorKeyPermissionError, OperatorKeyNotFoundError, ConfidentialComputeStartupError
+from azure.keyvault.secrets import SecretClient
+from azure.identity import DefaultAzureCredential, CredentialUnavailableError
+from azure.core.exceptions import ResourceNotFoundError, ClientAuthenticationError
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+class AzureEntryPoint(ConfidentialCompute):
+
+ kv_name = os.getenv("VAULT_NAME")
+ secret_name = os.getenv("OPERATOR_KEY_SECRET_NAME")
+ env_name = os.getenv("DEPLOYMENT_ENVIRONMENT")
+ jar_name = os.getenv("JAR_NAME", "default-jar-name")
+ jar_version = os.getenv("JAR_VERSION", "default-jar-version")
+ default_core_endpoint = f"https://core-{env_name}.uidapi.com".lower()
+ default_optout_endpoint = f"https://optout-{env_name}.uidapi.com".lower()
+
+ FINAL_CONFIG = "/tmp/final-config.json"
+
+ def __init__(self):
+ super().__init__()
+
+ def __check_env_variables(self):
+ # Check essential env variables
+ if AzureEntryPoint.kv_name is None:
+ raise ConfigurationMissingError(self.__class__.__name__, ["VAULT_NAME"])
+ if AzureEntryPoint.secret_name is None:
+ raise ConfigurationMissingError(self.__class__.__name__, ["OPERATOR_KEY_SECRET_NAME"])
+ if AzureEntryPoint.env_name is None:
+ raise ConfigurationMissingError(self.__class__.__name__, ["DEPLOYMENT_ENVIRONMENT"])
+ logging.info("Environment variables validation success")
+
+ def __create_final_config(self):
+ TARGET_CONFIG = f"/app/conf/{AzureEntryPoint.env_name}-uid2-config.json"
+ if not os.path.isfile(TARGET_CONFIG):
+ logging.error(f"Unrecognized config {TARGET_CONFIG}")
+ sys.exit(1)
+
+ logging.info(f"-- copying {TARGET_CONFIG} to {AzureEntryPoint.FINAL_CONFIG}")
+ try:
+ shutil.copy(TARGET_CONFIG, AzureEntryPoint.FINAL_CONFIG)
+ except IOError as e:
+ logging.error(f"Failed to create {AzureEntryPoint.FINAL_CONFIG} with error: {e}")
+ sys.exit(1)
+
+ logging.info(f"-- replacing URLs by {self.configs["core_base_url"]} and {self.configs["optout_base_url"]}")
+ with open(AzureEntryPoint.FINAL_CONFIG, "r") as file:
+ config = file.read()
+
+ config = config.replace("https://core.uidapi.com", self.configs["core_base_url"])
+ config = config.replace("https://optout.uidapi.com", self.configs["optout_base_url"])
+ with open(AzureEntryPoint.FINAL_CONFIG, "w") as file:
+ file.write(config)
+
+ with open(AzureEntryPoint.FINAL_CONFIG, "r") as file:
+ logging.info(file.read())
+
+ def __set_operator_key(self):
+ try:
+ credential = DefaultAzureCredential()
+ kv_URL = f"https://{AzureEntryPoint.kv_name}.vault.azure.net"
+ secret_client = SecretClient(vault_url=kv_URL, credential=credential)
+ secret = secret_client.get_secret(AzureEntryPoint.secret_name)
+ self.configs["operator_key"] = secret.value
+
+ except (CredentialUnavailableError, ClientAuthenticationError) as auth_error:
+ logging.error(f"Read operator key, authentication error: {auth_error}")
+ raise OperatorKeyPermissionError(self.__class__.__name__, str(auth_error))
+ except ResourceNotFoundError as not_found_error:
+ logging.error(f"Read operator key, secret not found: {AzureEntryPoint.secret_name}. Error: {not_found_error}")
+ raise OperatorKeyNotFoundError(self.__class__.__name__, str(not_found_error))
+
+
+ def _set_confidential_config(self, secret_identifier: str = None):
+ """Builds and sets ConfidentialComputeConfig"""
+ self.configs["skip_validations"] = os.getenv("SKIP_VALIDATIONS", "false").lower() == "true"
+ self.configs["debug_mode"] = os.getenv("DEBUG_MODE", "false").lower() == "true"
+ self.configs["environment"] = AzureEntryPoint.env_name
+ self.configs["core_base_url"] = os.getenv("CORE_BASE_URL") if os.getenv("CORE_BASE_URL") and AzureEntryPoint.env_name == "integ" else AzureEntryPoint.default_core_endpoint
+ self.configs["optout_base_url"] = os.getenv("OPTOUT_BASE_URL") if os.getenv("OPTOUT_BASE_URL") and AzureEntryPoint.env_name == "integ" else AzureEntryPoint.default_optout_endpoint
+ self.__set_operator_key()
+
+ def __run_operator(self):
+
+ # Start the operator
+ os.environ["azure_vault_name"] = AzureEntryPoint.kv_name
+ os.environ["azure_secret_name"] = AzureEntryPoint.secret_name
+
+ java_command = [
+ "java",
+ "-XX:MaxRAMPercentage=95", "-XX:-UseCompressedOops", "-XX:+PrintFlagsFinal",
+ "-Djava.security.egd=file:/dev/./urandom",
+ "-Dvertx.logger-delegate-factory-class-name=io.vertx.core.logging.SLF4JLogDelegateFactory",
+ "-Dlogback.configurationFile=/app/conf/logback.xml",
+ f"-Dvertx-config-path={AzureEntryPoint.FINAL_CONFIG}",
+ "-jar",
+ f"{AzureEntryPoint.jar_name}-{AzureEntryPoint.jar_version}.jar"
+ ]
+ logging.info("-- starting java operator application")
+ self.run_command(java_command, separate_process=False)
+
+ def _validate_auxiliaries(self):
+ logging.info("Waiting for sidecar ...")
+
+ MAX_RETRIES = 15
+ PING_URL = "http://169.254.169.254/ping"
+ delay = 1
+
+ for attempt in range(1, MAX_RETRIES + 1):
+ try:
+ response = requests.get(PING_URL, timeout=5)
+ if response.status_code in [200, 204]:
+ logging.info("Sidecar started successfully.")
+ return
+ else:
+ logging.warning(
+ f"Attempt {attempt}: Unexpected status code {response.status_code}. Response: {response.text}"
+ )
+ except Exception as e:
+ logging.info(f"Attempt {attempt}: Error during request - {e}")
+
+ if attempt == MAX_RETRIES:
+ raise RuntimeError(f"Unable to detect sidecar running after {MAX_RETRIES} attempts. Exiting.")
+
+ logging.info(f"Retrying in {delay} seconds... (Attempt {attempt}/{MAX_RETRIES})")
+ time.sleep(delay)
+ delay += 1
+
+ def run_compute(self) -> None:
+ """Main execution flow for confidential compute."""
+ self.__check_env_variables()
+ self._set_confidential_config()
+ if not self.configs.get("skip_validations"):
+ self.validate_configuration()
+ self.__create_final_config()
+ self._setup_auxiliaries()
+ self.__run_operator()
+
+ def _setup_auxiliaries(self) -> None:
+ """ setup auxiliary services are running."""
+ pass
+
+if __name__ == "__main__":
+
+ logging.basicConfig(level=logging.INFO)
+ logging.info("Start AzureEntryPoint")
+ try:
+ operator = AzureEntryPoint()
+ operator.run_compute()
+ except ConfidentialComputeStartupError as e:
+ logging.error(f"Failed starting up Azure Confidential Compute. Please checks the logs for errors and retry {e}", exc_info=True)
+ except Exception as e:
+ logging.error(f"Unexpected failure while starting up Azure Confidential Compute. Please contact UID support team with this log {e}", exc_info=True)
\ No newline at end of file
diff --git a/scripts/azure-cc/conf/default-config.json b/scripts/azure-cc/conf/default-config.json
index fbe3e7184..b63f7420c 100644
--- a/scripts/azure-cc/conf/default-config.json
+++ b/scripts/azure-cc/conf/default-config.json
@@ -38,7 +38,6 @@
"failure_shutdown_wait_hours": 120,
"sharing_token_expiry_seconds": 2592000,
"validate_service_links": false,
- "advertising_token_v4_percentage": 100,
- "site_ids_using_v4_tokens": "",
- "operator_type": "private"
+ "operator_type": "private",
+ "enable_remote_config": false
}
diff --git a/scripts/azure-cc/conf/integ-uid2-config.json b/scripts/azure-cc/conf/integ-uid2-config.json
index 2cd4be5c3..4a47711ee 100644
--- a/scripts/azure-cc/conf/integ-uid2-config.json
+++ b/scripts/azure-cc/conf/integ-uid2-config.json
@@ -1,14 +1,22 @@
{
- "sites_metadata_path": "https://core-integ.uidapi.com/sites/refresh",
- "clients_metadata_path": "https://core-integ.uidapi.com/clients/refresh",
- "keysets_metadata_path": "https://core-integ.uidapi.com/key/keyset/refresh",
- "keyset_keys_metadata_path": "https://core-integ.uidapi.com/key/keyset-keys/refresh",
- "client_side_keypairs_metadata_path": "https://core-integ.uidapi.com/client_side_keypairs/refresh",
- "salts_metadata_path": "https://core-integ.uidapi.com/salt/refresh",
- "services_metadata_path": "https://core-integ.uidapi.com/services/refresh",
- "service_links_metadata_path": "https://core-integ.uidapi.com/service_links/refresh",
- "optout_metadata_path": "https://optout-integ.uidapi.com/optout/refresh",
- "core_attest_url": "https://core-integ.uidapi.com/attest",
- "optout_api_uri": "https://optout-integ.uidapi.com/optout/replicate",
- "optout_s3_folder": "uid-optout-integ/"
+ "sites_metadata_path": "https://core.uidapi.com/sites/refresh",
+ "clients_metadata_path": "https://core.uidapi.com/clients/refresh",
+ "keysets_metadata_path": "https://core.uidapi.com/key/keyset/refresh",
+ "keyset_keys_metadata_path": "https://core.uidapi.com/key/keyset-keys/refresh",
+ "client_side_keypairs_metadata_path": "https://core.uidapi.com/client_side_keypairs/refresh",
+ "salts_metadata_path": "https://core.uidapi.com/salt/refresh",
+ "services_metadata_path": "https://core.uidapi.com/services/refresh",
+ "service_links_metadata_path": "https://core.uidapi.com/service_links/refresh",
+ "optout_metadata_path": "https://optout.uidapi.com/optout/refresh",
+ "core_attest_url": "https://core.uidapi.com/attest",
+ "optout_api_uri": "https://optout.uidapi.com/optout/replicate",
+ "cloud_encryption_keys_metadata_path": "https://core.uidapi.com/cloud_encryption_keys/retrieve",
+ "optout_s3_folder": "uid-optout-integ/",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "https://core.uidapi.com/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ }
}
diff --git a/scripts/azure-cc/conf/prod-uid2-config.json b/scripts/azure-cc/conf/prod-uid2-config.json
index 02e2cde20..232344504 100644
--- a/scripts/azure-cc/conf/prod-uid2-config.json
+++ b/scripts/azure-cc/conf/prod-uid2-config.json
@@ -1,15 +1,23 @@
{
- "sites_metadata_path": "https://core-prod.uidapi.com/sites/refresh",
- "clients_metadata_path": "https://core-prod.uidapi.com/clients/refresh",
- "keysets_metadata_path": "https://core-prod.uidapi.com/key/keyset/refresh",
- "keyset_keys_metadata_path": "https://core-prod.uidapi.com/key/keyset-keys/refresh",
- "client_side_keypairs_metadata_path": "https://core-prod.uidapi.com/client_side_keypairs/refresh",
- "salts_metadata_path": "https://core-prod.uidapi.com/salt/refresh",
- "services_metadata_path": "https://core-prod.uidapi.com/services/refresh",
- "service_links_metadata_path": "https://core-prod.uidapi.com/service_links/refresh",
- "optout_metadata_path": "https://optout-prod.uidapi.com/optout/refresh",
- "core_attest_url": "https://core-prod.uidapi.com/attest",
- "optout_api_uri": "https://optout-prod.uidapi.com/optout/replicate",
+ "sites_metadata_path": "https://core.uidapi.com/sites/refresh",
+ "clients_metadata_path": "https://core.uidapi.com/clients/refresh",
+ "keysets_metadata_path": "https://core.uidapi.com/key/keyset/refresh",
+ "keyset_keys_metadata_path": "https://core.uidapi.com/key/keyset-keys/refresh",
+ "client_side_keypairs_metadata_path": "https://core.uidapi.com/client_side_keypairs/refresh",
+ "salts_metadata_path": "https://core.uidapi.com/salt/refresh",
+ "services_metadata_path": "https://core.uidapi.com/services/refresh",
+ "service_links_metadata_path": "https://core.uidapi.com/service_links/refresh",
+ "optout_metadata_path": "https://optout.uidapi.com/optout/refresh",
+ "core_attest_url": "https://core.uidapi.com/attest",
+ "cloud_encryption_keys_metadata_path": "https://core.uidapi.com/cloud_encryption_keys/retrieve",
+ "optout_api_uri": "https://optout.uidapi.com/optout/replicate",
"optout_s3_folder": "optout-v2/",
- "identity_token_expires_after_seconds": 259200
+ "identity_token_expires_after_seconds": 259200,
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "https://core.uidapi.com/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ }
}
diff --git a/scripts/azure-cc/deployment/operator.json b/scripts/azure-cc/deployment/operator.json
index b50ecced9..0348802de 100644
--- a/scripts/azure-cc/deployment/operator.json
+++ b/scripts/azure-cc/deployment/operator.json
@@ -54,6 +54,16 @@
"metadata": {
"description": "Operator Key"
}
+ },
+ "skipValidations": {
+ "type": "string",
+ "metadata": {
+ "description": "Whether to skip pre-init validations"
+ },
+ "allowedValues": [
+ "true",
+ "false"
+ ]
}
},
"variables": {
@@ -122,6 +132,10 @@
{
"name": "DEPLOYMENT_ENVIRONMENT",
"value": "[parameters('deploymentEnvironment')]"
+ },
+ {
+ "name": "SKIP_VALIDATIONS",
+ "value": "[parameters('skipValidations')]"
}
]
}
diff --git a/scripts/azure-cc/deployment/operator.parameters.json b/scripts/azure-cc/deployment/operator.parameters.json
index 776690776..5095746ea 100644
--- a/scripts/azure-cc/deployment/operator.parameters.json
+++ b/scripts/azure-cc/deployment/operator.parameters.json
@@ -22,6 +22,9 @@
},
"deploymentEnvironment": {
"value": "integ"
+ },
+ "skipValidations": {
+ "value": "false"
}
}
}
diff --git a/scripts/azure-cc/entrypoint.sh b/scripts/azure-cc/entrypoint.sh
deleted file mode 100644
index 14875c9bf..000000000
--- a/scripts/azure-cc/entrypoint.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/sh
-#
-# This script must be compatible with Ash (provided in eclipse-temurin Docker image) and Bash
-
-function wait_for_sidecar() {
- url="http://169.254.169.254/ping"
- delay=1
- max_retries=15
-
- while true; do
- if wget -q --spider --tries=1 --timeout 5 "$url" > /dev/null; then
- echo "side car started"
- break
- else
- echo "side car not started. Retrying in $delay seconds..."
- sleep $delay
- if [ $delay -gt $max_retries ]; then
- echo "side car failed to start"
- break
- fi
- delay=$((delay + 1))
- fi
- done
-}
-
-TMP_FINAL_CONFIG="/tmp/final-config.tmp"
-
-if [ -z "${VAULT_NAME}" ]; then
- echo "VAULT_NAME cannot be empty"
- exit 1
-fi
-
-if [ -z "${OPERATOR_KEY_SECRET_NAME}" ]; then
- echo "OPERATOR_KEY_SECRET_NAME cannot be empty"
- exit 1
-fi
-
-export azure_vault_name="${VAULT_NAME}"
-export azure_secret_name="${OPERATOR_KEY_SECRET_NAME}"
-
-# -- locate config file
-if [ -z "${DEPLOYMENT_ENVIRONMENT}" ]; then
- echo "DEPLOYMENT_ENVIRONMENT cannot be empty"
- exit 1
-fi
-if [ "${DEPLOYMENT_ENVIRONMENT}" != 'prod' -a "${DEPLOYMENT_ENVIRONMENT}" != 'integ' ]; then
- echo "Unrecognized DEPLOYMENT_ENVIRONMENT ${DEPLOYMENT_ENVIRONMENT}"
- exit 1
-fi
-
-TARGET_CONFIG="/app/conf/${DEPLOYMENT_ENVIRONMENT}-uid2-config.json"
-if [ ! -f "${TARGET_CONFIG}" ]; then
- echo "Unrecognized config ${TARGET_CONFIG}"
- exit 1
-fi
-
-FINAL_CONFIG="/tmp/final-config.json"
-echo "-- copying ${TARGET_CONFIG} to ${FINAL_CONFIG}"
-cp ${TARGET_CONFIG} ${FINAL_CONFIG}
-if [ $? -ne 0 ]; then
- echo "Failed to create ${FINAL_CONFIG} with error code $?"
- exit 1
-fi
-
-# -- replace base URLs if both CORE_BASE_URL and OPTOUT_BASE_URL are provided
-# -- using hardcoded domains is fine because they should not be changed frequently
-if [ -n "${CORE_BASE_URL}" -a -n "${OPTOUT_BASE_URL}" -a "${DEPLOYMENT_ENVIRONMENT}" != 'prod' ]; then
- echo "-- replacing URLs by ${CORE_BASE_URL} and ${OPTOUT_BASE_URL}"
- sed -i "s#https://core-integ.uidapi.com#${CORE_BASE_URL}#g" ${FINAL_CONFIG}
-
- sed -i "s#https://optout-integ.uidapi.com#${OPTOUT_BASE_URL}#g" ${FINAL_CONFIG}
-fi
-
-cat $FINAL_CONFIG
-
-# delay the start of the operator until the side car has started correctly
-wait_for_sidecar
-
-# -- start operator
-echo "-- starting java application"
-java \
- -XX:MaxRAMPercentage=95 -XX:-UseCompressedOops -XX:+PrintFlagsFinal \
- -Djava.security.egd=file:/dev/./urandom \
- -Dvertx.logger-delegate-factory-class-name=io.vertx.core.logging.SLF4JLogDelegateFactory \
- -Dlogback.configurationFile=/app/conf/logback.xml \
- -Dvertx-config-path=${FINAL_CONFIG} \
- -jar ${JAR_NAME}-${JAR_VERSION}.jar
diff --git a/scripts/confidential_compute.py b/scripts/confidential_compute.py
new file mode 100644
index 000000000..73b572a0b
--- /dev/null
+++ b/scripts/confidential_compute.py
@@ -0,0 +1,151 @@
+import requests
+import re
+import socket
+from urllib.parse import urlparse
+from abc import ABC, abstractmethod
+from typing import TypedDict, NotRequired, get_type_hints
+import subprocess
+import logging
+
+class ConfidentialComputeConfig(TypedDict):
+ operator_key: str
+ core_base_url: str
+ optout_base_url: str
+ environment: str
+ skip_validations: NotRequired[bool]
+ debug_mode: NotRequired[bool]
+
+class ConfidentialComputeStartupError(Exception):
+ def __init__(self, error_name, provider, extra_message=None):
+ urls = {
+ "EC2EntryPoint": "https://unifiedid.com/docs/guides/operator-guide-aws-marketplace#uid2-operator-error-codes",
+ "AzureEntryPoint": "https://unifiedid.com/docs/guides/operator-guide-azure-enclave#uid2-operator-error-codes",
+ "GCPEntryPoint": "https://unifiedid.com/docs/guides/operator-private-gcp-confidential-space#uid2-operator-error-codes",
+ }
+ url = urls.get(provider)
+ super().__init__(f"{error_name}\n" + (extra_message if extra_message else "") + f"\nVisit {url} for more details")
+
+class InstanceProfileMissingError(ConfidentialComputeStartupError):
+ def __init__(self, cls, message = None):
+ super().__init__(error_name=f"E01: {self.__class__.__name__}", provider=cls, extra_message=message)
+
+class OperatorKeyNotFoundError(ConfidentialComputeStartupError):
+ def __init__(self, cls, message = None):
+ super().__init__(error_name=f"E02: {self.__class__.__name__}", provider=cls, extra_message=message)
+
+class ConfigurationMissingError(ConfidentialComputeStartupError):
+ def __init__(self, cls, missing_keys):
+ super().__init__(error_name=f"E03: {self.__class__.__name__}", provider=cls, extra_message=', '.join(missing_keys))
+
+class ConfigurationValueError(ConfidentialComputeStartupError):
+ def __init__(self, cls, config_key = None):
+ super().__init__(error_name=f"E04: {self.__class__.__name__} " , provider=cls, extra_message=config_key)
+
+class OperatorKeyValidationError(ConfidentialComputeStartupError):
+ def __init__(self, cls):
+ super().__init__(error_name=f"E05: {self.__class__.__name__}", provider=cls)
+
+class UID2ServicesUnreachableError(ConfidentialComputeStartupError):
+ def __init__(self, cls, ip=None):
+ super().__init__(error_name=f"E06: {self.__class__.__name__}", provider=cls, extra_message=ip)
+
+class OperatorKeyPermissionError(ConfidentialComputeStartupError):
+ def __init__(self, cls, message = None):
+ super().__init__(error_name=f"E08: {self.__class__.__name__}", provider=cls, extra_message=message)
+
+class ConfidentialCompute(ABC):
+
+ def __init__(self):
+ self.configs: ConfidentialComputeConfig = {}
+
+ def validate_configuration(self):
+ """ Validates the paramters specified through configs/secret manager ."""
+ logging.info("Validating configurations provided")
+ def validate_operator_key():
+ """ Validates the operator key format and its environment alignment."""
+ operator_key = self.configs.get("operator_key")
+ pattern = r"^(UID2|EUID)-.\-(I|P|L)-\d+-.*$"
+ if re.match(pattern, operator_key):
+ env = self.configs.get("environment", "").lower()
+ debug_mode = self.configs.get("debug_mode", False)
+ expected_env = "I" if debug_mode or env == "integ" else "P"
+ if operator_key.split("-")[2] != expected_env:
+ raise OperatorKeyValidationError(self.__class__.__name__)
+ logging.info("Validated operator key matches environment")
+ else:
+ logging.info("Skipping operator key validation")
+
+ def validate_url(url_key, environment):
+ """URL should include environment except in prod"""
+ if environment != "prod" and environment not in self.configs[url_key]:
+ raise ConfigurationValueError(self.__class__.__name__, url_key)
+ parsed_url = urlparse(self.configs[url_key])
+ if parsed_url.scheme != 'https' and parsed_url.path:
+ raise ConfigurationValueError(self.__class__.__name__, url_key)
+ logging.info(f"Validated {self.configs[url_key]} matches other config parameters")
+
+ def validate_connectivity() -> None:
+ """ Validates that the core URL is accessible."""
+ try:
+ core_url = self.configs["core_base_url"]
+ core_ip = socket.gethostbyname(urlparse(core_url).netloc)
+ requests.get(core_url, timeout=5)
+ logging.info(f"Validated connectivity to {core_url}")
+ except (requests.ConnectionError, requests.Timeout) as e:
+ raise UID2ServicesUnreachableError(self.__class__.__name__, core_ip)
+ except Exception as e:
+ raise UID2ServicesUnreachableError(self.__class__.__name__)
+
+ type_hints = get_type_hints(ConfidentialComputeConfig, include_extras=True)
+ required_keys = [field for field, hint in type_hints.items() if "NotRequired" not in str(hint)]
+ missing_keys = [key for key in required_keys if key not in self.configs or self.configs[key] == None]
+ if missing_keys:
+ raise ConfigurationMissingError(self.__class__.__name__, missing_keys)
+
+ environment = self.configs["environment"]
+ if environment not in ["integ", "prod"]:
+ raise ConfigurationValueError(self.__class__.__name__, "environment")
+
+ if self.configs.get("debug_mode") and environment == "prod":
+ raise ConfigurationValueError(self.__class__.__name__, "debug_mode")
+
+ validate_url("core_base_url", environment)
+ validate_url("optout_base_url", environment)
+ validate_operator_key()
+ validate_connectivity()
+ logging.info("Completed static validation of confidential compute config values")
+
+ @abstractmethod
+ def _set_confidential_config(self, secret_identifier: str) -> None:
+ """
+ Set ConfidentialComputeConfig
+ """
+ pass
+
+ @abstractmethod
+ def _setup_auxiliaries(self) -> None:
+ """ Sets up auxiliary processes required for confidential computing. """
+ pass
+
+ @abstractmethod
+ def _validate_auxiliaries(self) -> None:
+ """ Validates auxiliary services are running."""
+ pass
+
+ @abstractmethod
+ def run_compute(self) -> None:
+ """ Runs confidential computing."""
+ pass
+
+ @staticmethod
+ def run_command(command, separate_process=False, stdout=None, stderr=None):
+ logging.info(f"Running command: {' '.join(command)}")
+ try:
+ if separate_process:
+ subprocess.Popen(command, stdout=stdout, stderr=stderr)
+ else:
+ subprocess.run(command, check=True, stdout=stdout, stderr=stderr)
+
+ except Exception as e:
+ logging.error(f"Failed to run command: {e}", exc_info=True)
+ raise RuntimeError (f"Failed to start {' '.join(command)} ")
\ No newline at end of file
diff --git a/scripts/gcp-oidc/Dockerfile b/scripts/gcp-oidc/Dockerfile
index 76b302e30..5320b7223 100644
--- a/scripts/gcp-oidc/Dockerfile
+++ b/scripts/gcp-oidc/Dockerfile
@@ -1,11 +1,15 @@
-# sha from https://hub.docker.com/layers/amd64/eclipse-temurin/21.0.4_7-jre-alpine/images/sha256-8179ddc8a6c5ac9af935020628763b9a5a671e0914976715d2b61b21881cefca
-FROM eclipse-temurin@sha256:8179ddc8a6c5ac9af935020628763b9a5a671e0914976715d2b61b21881cefca
+# sha from https://hub.docker.com/layers/amd64/eclipse-temurin/21.0.6_7-jre-alpine/images/sha256-f184bb601f9e6068dd0a92738764d1ff447ab68c15ddbf8c303c5c29de9a1df8
+FROM eclipse-temurin@sha256:f184bb601f9e6068dd0a92738764d1ff447ab68c15ddbf8c303c5c29de9a1df8
-LABEL "tee.launch_policy.allow_env_override"="API_TOKEN_SECRET_NAME,DEPLOYMENT_ENVIRONMENT,CORE_BASE_URL,OPTOUT_BASE_URL"
+LABEL "tee.launch_policy.allow_env_override"="API_TOKEN_SECRET_NAME,DEPLOYMENT_ENVIRONMENT,CORE_BASE_URL,OPTOUT_BASE_URL,DEBUG_MODE,SKIP_VALIDATIONS"
LABEL "tee.launch_policy.log_redirect"="always"
# Install Packages
-RUN apk update && apk add jq
+RUN apk update && apk add --no-cache jq python3 py3-pip && \
+ python3 -m venv /venv && \
+ . /venv/bin/activate && \
+ pip install --no-cache-dir google-cloud-secret-manager google-auth google-api-core && \
+ rm -rf /var/cache/apk/*
WORKDIR /app
EXPOSE 8080
@@ -18,7 +22,6 @@ ENV JAR_NAME=${JAR_NAME}
ENV JAR_VERSION=${JAR_VERSION}
ENV IMAGE_VERSION=${IMAGE_VERSION}
ENV REGION=default
-ENV LOKI_HOSTNAME=loki
COPY ./target/${JAR_NAME}-${JAR_VERSION}-jar-with-dependencies.jar /app/${JAR_NAME}-${JAR_VERSION}.jar
COPY ./target/${JAR_NAME}-${JAR_VERSION}-sources.jar /app
@@ -28,9 +31,10 @@ COPY ./conf/*.xml /app/conf/
RUN tar xzvf /app/static.tar.gz --no-same-owner --no-same-permissions && rm -f /app/static.tar.gz
-COPY ./entrypoint.sh /app/
-RUN chmod a+x /app/entrypoint.sh
+COPY ./gcp.py /app/
+COPY ./confidential_compute.py /app
+RUN chmod a+x /app/gcp.py
RUN mkdir -p /opt/uid2 && chmod 777 -R /opt/uid2 && mkdir -p /app && chmod 705 -R /app && mkdir -p /app/file-uploads && chmod 777 -R /app/file-uploads
-CMD ["/app/entrypoint.sh"]
+CMD ["/venv/bin/python", "/app/gcp.py"]
diff --git a/scripts/gcp-oidc/conf/default-config.json b/scripts/gcp-oidc/conf/default-config.json
index 302a8c3c3..5d54e792d 100644
--- a/scripts/gcp-oidc/conf/default-config.json
+++ b/scripts/gcp-oidc/conf/default-config.json
@@ -1,44 +1,42 @@
-{
- "service_verbose": true,
- "service_instances": 12,
- "core_s3_bucket": null,
- "core_attest_url": null,
- "core_api_token": null,
- "storage_mock": false,
- "optout_s3_bucket": null,
- "optout_s3_folder": "optout/",
- "optout_s3_path_compat": false,
- "optout_data_dir": "/opt/uid2/operator-optout/",
- "optout_api_token": null,
- "optout_api_uri": null,
- "optout_bloom_filter_size": 8192,
- "optout_delta_rotate_interval": 300,
- "optout_delta_backtrack_in_days": 1,
- "optout_partition_interval": 86400,
- "optout_max_partitions": 30,
- "optout_heap_default_capacity": 8192,
- "cloud_download_threads": 8,
- "cloud_upload_threads": 2,
- "cloud_refresh_interval": 60,
- "sites_metadata_path": "sites/metadata.json",
- "clients_metadata_path": "clients/metadata.json",
- "client_side_keypairs_metadata_path": "client_side_keypairs/metadata.json",
- "keysets_metadata_path": "keysets/metadata.json",
- "keyset_keys_metadata_path": "keyset_keys/metadata.json",
- "salts_metadata_path": "salts/metadata.json",
- "services_metadata_path": "services/metadata.json",
- "service_links_metadata_path": "service_links/metadata.json",
- "optout_metadata_path": null,
- "enclave_platform": "gcp-oidc",
- "optout_inmem_cache": true,
- "identity_token_expires_after_seconds": 86400,
- "refresh_token_expires_after_seconds": 2592000,
- "refresh_identity_token_after_seconds": 3600,
- "allow_legacy_api": false,
- "failure_shutdown_wait_hours": 120,
- "sharing_token_expiry_seconds": 2592000,
- "validate_service_links": false,
- "advertising_token_v4_percentage": 100,
- "site_ids_using_v4_tokens": "",
- "operator_type": "private"
-}
+{
+ "service_verbose": true,
+ "service_instances": 12,
+ "core_s3_bucket": null,
+ "core_attest_url": null,
+ "core_api_token": null,
+ "storage_mock": false,
+ "optout_s3_bucket": null,
+ "optout_s3_folder": "optout/",
+ "optout_s3_path_compat": false,
+ "optout_data_dir": "/opt/uid2/operator-optout/",
+ "optout_api_token": null,
+ "optout_api_uri": null,
+ "optout_bloom_filter_size": 8192,
+ "optout_delta_rotate_interval": 300,
+ "optout_delta_backtrack_in_days": 1,
+ "optout_partition_interval": 86400,
+ "optout_max_partitions": 30,
+ "optout_heap_default_capacity": 8192,
+ "cloud_download_threads": 8,
+ "cloud_upload_threads": 2,
+ "cloud_refresh_interval": 60,
+ "sites_metadata_path": "sites/metadata.json",
+ "clients_metadata_path": "clients/metadata.json",
+ "client_side_keypairs_metadata_path": "client_side_keypairs/metadata.json",
+ "keysets_metadata_path": "keysets/metadata.json",
+ "keyset_keys_metadata_path": "keyset_keys/metadata.json",
+ "salts_metadata_path": "salts/metadata.json",
+ "services_metadata_path": "services/metadata.json",
+ "service_links_metadata_path": "service_links/metadata.json",
+ "optout_metadata_path": null,
+ "enclave_platform": "gcp-oidc",
+ "optout_inmem_cache": true,
+ "identity_token_expires_after_seconds": 86400,
+ "refresh_token_expires_after_seconds": 2592000,
+ "refresh_identity_token_after_seconds": 3600,
+ "allow_legacy_api": false,
+ "failure_shutdown_wait_hours": 120,
+ "sharing_token_expiry_seconds": 2592000,
+ "validate_service_links": false,
+ "operator_type": "private"
+}
\ No newline at end of file
diff --git a/scripts/gcp-oidc/conf/integ-config.json b/scripts/gcp-oidc/conf/integ-config.json
new file mode 100644
index 000000000..5d3882f25
--- /dev/null
+++ b/scripts/gcp-oidc/conf/integ-config.json
@@ -0,0 +1,22 @@
+{
+ "sites_metadata_path": "https://core.uidapi.com/sites/refresh",
+ "clients_metadata_path": "https://core.uidapi.com/clients/refresh",
+ "keysets_metadata_path": "https://core.uidapi.com/key/keyset/refresh",
+ "keyset_keys_metadata_path": "https://core.uidapi.com/key/keyset-keys/refresh",
+ "client_side_keypairs_metadata_path": "https://core.uidapi.com/client_side_keypairs/refresh",
+ "salts_metadata_path": "https://core.uidapi.com/salt/refresh",
+ "services_metadata_path": "https://core.uidapi.com/services/refresh",
+ "service_links_metadata_path": "https://core.uidapi.com/service_links/refresh",
+ "optout_metadata_path": "https://optout.uidapi.com/optout/refresh",
+ "core_attest_url": "https://core.uidapi.com/attest",
+ "cloud_encryption_keys_metadata_path": "https://core.uidapi.com/cloud_encryption_keys/retrieve",
+ "optout_api_uri": "https://optout.uidapi.com/optout/replicate",
+ "optout_s3_folder": "uid-optout-integ/",
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "https://core.uidapi.com/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ }
+}
\ No newline at end of file
diff --git a/scripts/gcp-oidc/conf/integ-uid2-config.json b/scripts/gcp-oidc/conf/integ-uid2-config.json
deleted file mode 100644
index 935514b5a..000000000
--- a/scripts/gcp-oidc/conf/integ-uid2-config.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "sites_metadata_path": "https://core.uidapi.com/sites/refresh",
- "clients_metadata_path": "https://core.uidapi.com/clients/refresh",
- "keysets_metadata_path": "https://core.uidapi.com/key/keyset/refresh",
- "keyset_keys_metadata_path": "https://core.uidapi.com/key/keyset-keys/refresh",
- "client_side_keypairs_metadata_path": "https://core.uidapi.com/client_side_keypairs/refresh",
- "salts_metadata_path": "https://core.uidapi.com/salt/refresh",
- "services_metadata_path": "https://core.uidapi.com/services/refresh",
- "service_links_metadata_path": "https://core.uidapi.com/service_links/refresh",
- "optout_metadata_path": "https://optout.uidapi.com/optout/refresh",
- "core_attest_url": "https://core.uidapi.com/attest",
- "optout_api_uri": "https://optout.uidapi.com/optout/replicate",
- "optout_s3_folder": "uid-optout-integ/"
-}
diff --git a/scripts/gcp-oidc/conf/prod-uid2-config.json b/scripts/gcp-oidc/conf/prod-config.json
similarity index 72%
rename from scripts/gcp-oidc/conf/prod-uid2-config.json
rename to scripts/gcp-oidc/conf/prod-config.json
index f5445a9ec..232344504 100644
--- a/scripts/gcp-oidc/conf/prod-uid2-config.json
+++ b/scripts/gcp-oidc/conf/prod-config.json
@@ -1,15 +1,23 @@
-{
- "sites_metadata_path": "https://core.uidapi.com/sites/refresh",
- "clients_metadata_path": "https://core.uidapi.com/clients/refresh",
- "keysets_metadata_path": "https://core.uidapi.com/key/keyset/refresh",
- "keyset_keys_metadata_path": "https://core.uidapi.com/key/keyset-keys/refresh",
- "client_side_keypairs_metadata_path": "https://core.uidapi.com/client_side_keypairs/refresh",
- "salts_metadata_path": "https://core.uidapi.com/salt/refresh",
- "services_metadata_path": "https://core.uidapi.com/services/refresh",
- "service_links_metadata_path": "https://core.uidapi.com/service_links/refresh",
- "optout_metadata_path": "https://optout.uidapi.com/optout/refresh",
- "core_attest_url": "https://core.uidapi.com/attest",
- "optout_api_uri": "https://optout.uidapi.com/optout/replicate",
- "optout_s3_folder": "optout-v2/",
- "identity_token_expires_after_seconds": 259200
-}
+{
+ "sites_metadata_path": "https://core.uidapi.com/sites/refresh",
+ "clients_metadata_path": "https://core.uidapi.com/clients/refresh",
+ "keysets_metadata_path": "https://core.uidapi.com/key/keyset/refresh",
+ "keyset_keys_metadata_path": "https://core.uidapi.com/key/keyset-keys/refresh",
+ "client_side_keypairs_metadata_path": "https://core.uidapi.com/client_side_keypairs/refresh",
+ "salts_metadata_path": "https://core.uidapi.com/salt/refresh",
+ "services_metadata_path": "https://core.uidapi.com/services/refresh",
+ "service_links_metadata_path": "https://core.uidapi.com/service_links/refresh",
+ "optout_metadata_path": "https://optout.uidapi.com/optout/refresh",
+ "core_attest_url": "https://core.uidapi.com/attest",
+ "cloud_encryption_keys_metadata_path": "https://core.uidapi.com/cloud_encryption_keys/retrieve",
+ "optout_api_uri": "https://optout.uidapi.com/optout/replicate",
+ "optout_s3_folder": "optout-v2/",
+ "identity_token_expires_after_seconds": 259200,
+ "runtime_config_store": {
+ "type": "http",
+ "config" : {
+ "url": "https://core.uidapi.com/operator/config"
+ },
+ "config_scan_period_ms": 300000
+ }
+}
diff --git a/scripts/gcp-oidc/entrypoint.sh b/scripts/gcp-oidc/entrypoint.sh
deleted file mode 100644
index 133b54486..000000000
--- a/scripts/gcp-oidc/entrypoint.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-#
-# This script must be compatible with Ash (provided in eclipse-temurin Docker image) and Bash
-
-# -- set API tokens
-if [ -z "${API_TOKEN_SECRET_NAME}" ]; then
- echo "API_TOKEN_SECRET_NAME cannot be empty"
- exit 1
-fi
-
-if [ -z "${CORE_BASE_URL}" ]; then
- echo "CORE_BASE_URL cannot be empty"
- exit 1
-fi
-
-if [ -z "${OPTOUT_BASE_URL}" ]; then
- echo "OPTOUT_BASE_URL cannot be empty"
- exit 1
-fi
-
-export gcp_secret_version_name="${API_TOKEN_SECRET_NAME}"
-
-# -- locate config file
-if [ -z "${DEPLOYMENT_ENVIRONMENT}" ]; then
- echo "DEPLOYMENT_ENVIRONMENT cannot be empty"
- exit 1
-fi
-if [ "${DEPLOYMENT_ENVIRONMENT}" != 'prod' -a "${DEPLOYMENT_ENVIRONMENT}" != 'integ' ]; then
- echo "Unrecognized DEPLOYMENT_ENVIRONMENT ${DEPLOYMENT_ENVIRONMENT}"
- exit 1
-fi
-
-TARGET_CONFIG="/app/conf/${DEPLOYMENT_ENVIRONMENT}-uid2-config.json"
-if [ ! -f "${TARGET_CONFIG}" ]; then
- echo "Unrecognized config ${TARGET_CONFIG}"
- exit 1
-fi
-
-FINAL_CONFIG="/tmp/final-config.json"
-echo "-- copying ${TARGET_CONFIG} to ${FINAL_CONFIG}"
-cp ${TARGET_CONFIG} ${FINAL_CONFIG}
-if [ $? -ne 0 ]; then
- echo "Failed to create ${FINAL_CONFIG} with error code $?"
- exit 1
-fi
-
-# -- using hardcoded domains is fine because they should not be changed frequently
-echo "-- replacing URLs by ${CORE_BASE_URL} and ${OPTOUT_BASE_URL}"
-sed -i "s#https://core.uidapi.com#${CORE_BASE_URL}#g" ${FINAL_CONFIG}
-
-sed -i "s#https://optout.uidapi.com#${OPTOUT_BASE_URL}#g" ${FINAL_CONFIG}
-
-
-cat $FINAL_CONFIG
-
-# -- start operator
-echo "-- starting java application"
-java \
- -XX:MaxRAMPercentage=95 -XX:-UseCompressedOops -XX:+PrintFlagsFinal \
- -Djava.security.egd=file:/dev/./urandom \
- -Dvertx.logger-delegate-factory-class-name=io.vertx.core.logging.SLF4JLogDelegateFactory \
- -Dlogback.configurationFile=/app/conf/logback.xml \
- -Dvertx-config-path=${FINAL_CONFIG} \
- -jar ${JAR_NAME}-${JAR_VERSION}.jar
diff --git a/scripts/gcp-oidc/gcp.py b/scripts/gcp-oidc/gcp.py
new file mode 100644
index 000000000..ce91c9f32
--- /dev/null
+++ b/scripts/gcp-oidc/gcp.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python3
+
+import os
+import shutil
+from typing import Dict
+import sys
+import logging
+from google.cloud import secretmanager
+from google.auth.exceptions import DefaultCredentialsError
+from google.api_core.exceptions import PermissionDenied, NotFound
+sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+from confidential_compute import ConfidentialCompute, ConfidentialComputeConfig, ConfigurationMissingError, OperatorKeyNotFoundError, OperatorKeyPermissionError, ConfidentialComputeStartupError
+
+class GCPEntryPoint(ConfidentialCompute):
+
+ def __init__(self):
+ super().__init__()
+
+ def _set_confidential_config(self, secret_identifier=None) -> None:
+
+ keys_mapping = {
+ "core_base_url": "CORE_BASE_URL",
+ "optout_base_url": "OPTOUT_BASE_URL",
+ "environment": "DEPLOYMENT_ENVIRONMENT",
+ "skip_validations": "SKIP_VALIDATIONS",
+ "debug_mode": "DEBUG_MODE",
+ }
+ self.configs = {
+ key: (os.environ[env_var].lower() == "true" if key in ["skip_validations", "debug_mode"] else os.environ[env_var])
+ for key, env_var in keys_mapping.items() if env_var in os.environ
+ }
+
+ if not os.getenv("API_TOKEN_SECRET_NAME"):
+ raise ConfigurationMissingError(self.__class__.__name__, ["API_TOKEN_SECRET_NAME"])
+ try:
+ client = secretmanager.SecretManagerServiceClient()
+ secret_version_name = f"{os.getenv("API_TOKEN_SECRET_NAME")}"
+ response = client.access_secret_version(name=secret_version_name)
+ secret_value = response.payload.data.decode("UTF-8")
+ except (PermissionDenied, DefaultCredentialsError) as e:
+ raise OperatorKeyPermissionError(self.__class__.__name__, str(e))
+ except NotFound:
+ raise OperatorKeyNotFoundError(self.__class__.__name__, f"Secret Manager {os.getenv("API_TOKEN_SECRET_NAME")}")
+ self.configs["operator_key"] = secret_value
+
+ def __populate_operator_config(self, destination):
+ target_config = f"/app/conf/{self.configs["environment"].lower()}-config.json"
+ shutil.copy(target_config, destination)
+ with open(destination, 'r') as file:
+ config = file.read()
+ config = config.replace("https://core.uidapi.com", self.configs.get("core_base_url"))
+ config = config.replace("https://optout.uidapi.com", self.configs.get("optout_base_url"))
+ with open(destination, 'w') as file:
+ file.write(config)
+
+ def _setup_auxiliaries(self) -> None:
+ """ No Auxiliariy service required for GCP Confidential compute. """
+ pass
+
+ def _validate_auxiliaries(self) -> None:
+ """ No Auxiliariy service required for GCP Confidential compute. """
+ pass
+
+ def run_compute(self) -> None:
+ self._set_confidential_config()
+ logging.info("Fetched configs")
+ if not self.configs.get("skip_validations"):
+ self.validate_configuration()
+ config_locaton = "/tmp/final-config.json"
+ self.__populate_operator_config(config_locaton)
+ os.environ["gcp_secret_version_name"] = os.getenv("API_TOKEN_SECRET_NAME")
+ java_command = [
+ "java",
+ "-XX:MaxRAMPercentage=95",
+ "-XX:-UseCompressedOops",
+ "-XX:+PrintFlagsFinal",
+ "-Djava.security.egd=file:/dev/./urandom",
+ "-Dvertx.logger-delegate-factory-class-name=io.vertx.core.logging.SLF4JLogDelegateFactory",
+ "-Dlogback.configurationFile=/app/conf/logback.xml",
+ f"-Dvertx-config-path={config_locaton}",
+ "-jar",
+ f"{os.getenv("JAR_NAME")}-{os.getenv("JAR_VERSION")}.jar"
+ ]
+ self.run_command(java_command)
+
+if __name__ == "__main__":
+ try:
+ gcp = GCPEntryPoint()
+ gcp.run_compute()
+ except ConfidentialComputeStartupError as e:
+ logging.error(f"Failed starting up Confidential Compute. Please checks the logs for errors and retry {e}")
+ except Exception as e:
+ logging.error(f"Unexpected failure while starting up Confidential Compute. Please contact UID support team with this log {e}")
+
diff --git a/scripts/gcp-oidc/terraform/main.tf b/scripts/gcp-oidc/terraform/main.tf
index aefb68362..3a600e26e 100644
--- a/scripts/gcp-oidc/terraform/main.tf
+++ b/scripts/gcp-oidc/terraform/main.tf
@@ -106,6 +106,7 @@ resource "google_compute_instance_template" "uid_operator" {
tee-image-reference = var.uid_operator_image
tee-container-log-redirect = true
tee-restart-policy = "Never"
+ tee-env-DEBUG_MODE = var.debug_mode
tee-env-DEPLOYMENT_ENVIRONMENT = var.uid_deployment_env
tee-env-API_TOKEN_SECRET_NAME = module.secret-manager.secret_versions[0]
tee-env-CORE_BASE_URL = var.uid_deployment_env == "integ" ? "https://core-integ.uidapi.com" : "https://core-prod.uidapi.com"
diff --git a/src/main/java/com/uid2/operator/Const.java b/src/main/java/com/uid2/operator/Const.java
index 4d32b9034..d2ed93afd 100644
--- a/src/main/java/com/uid2/operator/Const.java
+++ b/src/main/java/com/uid2/operator/Const.java
@@ -20,6 +20,7 @@ public class Config extends com.uid2.shared.Const.Config {
public static final String ValidateServiceLinks = "validate_service_links";
public static final String OperatorTypeProp = "operator_type";
public static final String EnclavePlatformProp = "enclave_platform";
+ public static final String EncryptedFiles = "encrypted_files";
public static final String AzureVaultNameProp = "azure_vault_name";
public static final String AzureSecretNameProp = "azure_secret_name";
@@ -29,5 +30,10 @@ public class Config extends com.uid2.shared.Const.Config {
public static final String OptOutStatusMaxRequestSize = "optout_status_max_request_size";
public static final String MaxInvalidPaths = "logging_limit_max_invalid_paths_per_interval";
public static final String MaxVersionBucketsPerSite = "logging_limit_max_version_buckets_per_site";
+
+ public static final String ConfigScanPeriodMsProp = "config_scan_period_ms";
+ public static final String IdentityV3Prop = "identity_v3";
+ public static final String DisableOptoutTokenProp = "disable_optout_token";
+ public static final String EnableRemoteConfigProp = "enable_remote_config";
}
}
diff --git a/src/main/java/com/uid2/operator/Main.java b/src/main/java/com/uid2/operator/Main.java
index dad32611d..d1a66856d 100644
--- a/src/main/java/com/uid2/operator/Main.java
+++ b/src/main/java/com/uid2/operator/Main.java
@@ -8,8 +8,8 @@
import com.uid2.operator.monitoring.IStatsCollectorQueue;
import com.uid2.operator.monitoring.OperatorMetrics;
import com.uid2.operator.monitoring.StatsCollectorVerticle;
-import com.uid2.operator.service.SecureLinkValidatorService;
-import com.uid2.operator.service.ShutdownService;
+import com.uid2.operator.reader.RotatingCloudEncryptionKeyApiProvider;
+import com.uid2.operator.service.*;
import com.uid2.operator.vertx.Endpoints;
import com.uid2.operator.vertx.OperatorShutdownHandler;
import com.uid2.operator.store.CloudSyncOptOutStore;
@@ -22,9 +22,11 @@
import com.uid2.shared.jmx.AdminApi;
import com.uid2.shared.optout.OptOutCloudSync;
import com.uid2.shared.store.CloudPath;
+import com.uid2.shared.store.EncryptedRotatingSaltProvider;
import com.uid2.shared.store.RotatingSaltProvider;
import com.uid2.shared.store.reader.*;
import com.uid2.shared.store.scope.GlobalScope;
+import com.uid2.shared.util.HTTPPathMetricFilter;
import com.uid2.shared.vertx.CloudSyncVerticle;
import com.uid2.shared.vertx.ICloudSync;
import com.uid2.shared.vertx.RotatingStoreVerticle;
@@ -37,9 +39,9 @@
import io.micrometer.core.instrument.distribution.DistributionStatisticConfig;
import io.micrometer.prometheus.PrometheusMeterRegistry;
import io.micrometer.prometheus.PrometheusRenameFilter;
+import io.vertx.config.ConfigRetriever;
import io.vertx.core.*;
import io.vertx.core.http.HttpServerOptions;
-import io.vertx.core.http.impl.HttpUtils;
import io.vertx.core.json.JsonObject;
import io.vertx.micrometer.*;
import io.vertx.micrometer.backends.BackendRegistries;
@@ -57,6 +59,8 @@
import java.util.*;
import java.util.function.Supplier;
+import static com.uid2.operator.Const.Config.ConfigScanPeriodMsProp;
+import static com.uid2.operator.Const.Config.EnableRemoteConfigProp;
import static io.micrometer.core.instrument.Metrics.globalRegistry;
public class Main {
@@ -74,6 +78,7 @@ public class Main {
private final RotatingClientSideKeypairStore clientSideKeypairProvider;
private final RotatingSaltProvider saltProvider;
private final CloudSyncOptOutStore optOutStore;
+ private final boolean encryptedCloudFilesEnabled;
private OperatorShutdownHandler shutdownHandler = null;
private final OperatorMetrics metrics;
private final boolean clientSideTokenGenerate;
@@ -81,6 +86,7 @@ public class Main {
private IStatsCollectorQueue _statsCollectorQueue;
private RotatingServiceStore serviceProvider;
private RotatingServiceLinkStore serviceLinkProvider;
+ private RotatingCloudEncryptionKeyApiProvider cloudEncryptionKeyProvider;
public Main(Vertx vertx, JsonObject config) throws Exception {
this.vertx = vertx;
@@ -98,6 +104,7 @@ public Main(Vertx vertx, JsonObject config) throws Exception {
boolean useStorageMock = config.getBoolean(Const.Config.StorageMockProp, false);
this.clientSideTokenGenerate = config.getBoolean(Const.Config.EnableClientSideTokenGenerate, false);
this.validateServiceLinks = config.getBoolean(Const.Config.ValidateServiceLinks, false);
+ this.encryptedCloudFilesEnabled = config.getBoolean(Const.Config.EncryptedFiles, false);
this.shutdownHandler = new OperatorShutdownHandler(Duration.ofHours(12), Duration.ofHours(config.getInteger(Const.Config.SaltsExpiredShutdownHours, 12)), Clock.systemUTC(), new ShutdownService());
String coreAttestUrl = this.config.getString(Const.Config.CoreAttestUrlProp);
@@ -132,17 +139,46 @@ public Main(Vertx vertx, JsonObject config) throws Exception {
this.fsOptOut = configureCloudOptOutStore();
}
- String sitesMdPath = this.config.getString(Const.Config.SitesMetadataPathProp);
- String keypairMdPath = this.config.getString(Const.Config.ClientSideKeypairsMetadataPathProp);
- this.clientSideKeypairProvider = new RotatingClientSideKeypairStore(fsStores, new GlobalScope(new CloudPath(keypairMdPath)));
- String clientsMdPath = this.config.getString(Const.Config.ClientsMetadataPathProp);
- this.clientKeyProvider = new RotatingClientKeyProvider(fsStores, new GlobalScope(new CloudPath(clientsMdPath)));
- String keysetKeysMdPath = this.config.getString(Const.Config.KeysetKeysMetadataPathProp);
- this.keysetKeyStore = new RotatingKeysetKeyStore(fsStores, new GlobalScope(new CloudPath(keysetKeysMdPath)));
- String keysetMdPath = this.config.getString(Const.Config.KeysetsMetadataPathProp);
- this.keysetProvider = new RotatingKeysetProvider(fsStores, new GlobalScope(new CloudPath(keysetMdPath)));
- String saltsMdPath = this.config.getString(Const.Config.SaltsMetadataPathProp);
- this.saltProvider = new RotatingSaltProvider(fsStores, saltsMdPath);
+ if (this.encryptedCloudFilesEnabled) {
+ String cloudEncryptionKeyMdPath = this.config.getString(Const.Config.CloudEncryptionKeysMetadataPathProp);
+ this.cloudEncryptionKeyProvider = new RotatingCloudEncryptionKeyApiProvider(fsStores,
+ new GlobalScope(new CloudPath(cloudEncryptionKeyMdPath)));
+
+ String keypairMdPath = this.config.getString(Const.Config.ClientSideKeypairsMetadataPathProp);
+ this.clientSideKeypairProvider = new RotatingClientSideKeypairStore(fsStores,
+ new GlobalScope(new CloudPath(keypairMdPath)), cloudEncryptionKeyProvider);
+ String clientsMdPath = this.config.getString(Const.Config.ClientsMetadataPathProp);
+ this.clientKeyProvider = new RotatingClientKeyProvider(fsStores, new GlobalScope(new CloudPath(clientsMdPath)),
+ cloudEncryptionKeyProvider);
+ String keysetKeysMdPath = this.config.getString(Const.Config.KeysetKeysMetadataPathProp);
+ this.keysetKeyStore = new RotatingKeysetKeyStore(fsStores, new GlobalScope(new CloudPath(keysetKeysMdPath)),
+ cloudEncryptionKeyProvider);
+ String keysetMdPath = this.config.getString(Const.Config.KeysetsMetadataPathProp);
+ this.keysetProvider = new RotatingKeysetProvider(fsStores, new GlobalScope(new CloudPath(keysetMdPath)),
+ cloudEncryptionKeyProvider);
+ String saltsMdPath = this.config.getString(Const.Config.SaltsMetadataPathProp);
+ this.saltProvider = new EncryptedRotatingSaltProvider(fsStores, cloudEncryptionKeyProvider,
+ new GlobalScope(new CloudPath(saltsMdPath)));
+ String sitesMdPath = this.config.getString(Const.Config.SitesMetadataPathProp);
+ this.siteProvider = clientSideTokenGenerate
+ ? new RotatingSiteStore(fsStores, new GlobalScope(new CloudPath(sitesMdPath)),
+ cloudEncryptionKeyProvider)
+ : null;
+ } else {
+ String keypairMdPath = this.config.getString(Const.Config.ClientSideKeypairsMetadataPathProp);
+ this.clientSideKeypairProvider = new RotatingClientSideKeypairStore(fsStores, new GlobalScope(new CloudPath(keypairMdPath)));
+ String clientsMdPath = this.config.getString(Const.Config.ClientsMetadataPathProp);
+ this.clientKeyProvider = new RotatingClientKeyProvider(fsStores, new GlobalScope(new CloudPath(clientsMdPath)));
+ String keysetKeysMdPath = this.config.getString(Const.Config.KeysetKeysMetadataPathProp);
+ this.keysetKeyStore = new RotatingKeysetKeyStore(fsStores, new GlobalScope(new CloudPath(keysetKeysMdPath)));
+ String keysetMdPath = this.config.getString(Const.Config.KeysetsMetadataPathProp);
+ this.keysetProvider = new RotatingKeysetProvider(fsStores, new GlobalScope(new CloudPath(keysetMdPath)));
+ String saltsMdPath = this.config.getString(Const.Config.SaltsMetadataPathProp);
+ this.saltProvider = new RotatingSaltProvider(fsStores, saltsMdPath);
+ String sitesMdPath = this.config.getString(Const.Config.SitesMetadataPathProp);
+ this.siteProvider = clientSideTokenGenerate ? new RotatingSiteStore(fsStores, new GlobalScope(new CloudPath(sitesMdPath))) : null;
+ }
+
this.optOutStore = new CloudSyncOptOutStore(vertx, fsLocal, this.config, operatorKey, Clock.systemUTC());
if (this.validateServiceLinks) {
@@ -152,23 +188,26 @@ public Main(Vertx vertx, JsonObject config) throws Exception {
this.serviceLinkProvider = new RotatingServiceLinkStore(fsStores, new GlobalScope(new CloudPath(serviceLinkMdPath)));
}
- this.siteProvider = clientSideTokenGenerate ? new RotatingSiteStore(fsStores, new GlobalScope(new CloudPath(sitesMdPath))) : null;
-
if (useStorageMock && coreAttestUrl == null) {
if (clientSideTokenGenerate) {
this.siteProvider.loadContent();
this.clientSideKeypairProvider.loadContent();
}
- this.clientKeyProvider.loadContent();
- this.saltProvider.loadContent();
- this.keysetProvider.loadContent();
- this.keysetKeyStore.loadContent();
if (this.validateServiceLinks) {
this.serviceProvider.loadContent();
this.serviceLinkProvider.loadContent();
}
+ if (this.encryptedCloudFilesEnabled) {
+ this.cloudEncryptionKeyProvider.loadContent();
+ }
+
+ this.clientKeyProvider.loadContent();
+ this.saltProvider.loadContent();
+ this.keysetProvider.loadContent();
+ this.keysetKeyStore.loadContent();
+
try {
getKeyManager().getMasterKey();
} catch (KeyManager.NoActiveKeyException e) {
@@ -203,7 +242,9 @@ else if (!Utils.isProductionEnvironment()) {
}
Vertx vertx = createVertx();
- VertxUtils.createConfigRetriever(vertx).getConfig(ar -> {
+ ConfigRetriever configRetriever = VertxUtils.createConfigRetriever(vertx);
+
+ configRetriever.getConfig(ar -> {
if (ar.failed()) {
LOGGER.error("Unable to read config: " + ar.cause().getMessage(), ar.cause());
return;
@@ -264,40 +305,78 @@ private ICloudStorage wrapCloudStorageForOptOut(ICloudStorage cloudStorage) {
}
}
+ private Future initialiseConfigService() throws Exception {
+ boolean enableRemoteConfigFeatureFlag = config.getBoolean(EnableRemoteConfigProp, false);
+ ConfigRetriever configRetriever;
+
+ if (enableRemoteConfigFeatureFlag) {
+ configRetriever = ConfigRetrieverFactory.create(
+ vertx,
+ config.getJsonObject("runtime_config_store"),
+ this.createOperatorKeyRetriever().retrieve()
+ );
+ } else {
+ configRetriever = ConfigRetrieverFactory.create(
+ vertx,
+ new JsonObject()
+ .put("type", "json")
+ .put("config", config)
+ .put(ConfigScanPeriodMsProp, -1),
+ ""
+ );
+ }
+
+ return ConfigService.create(configRetriever)
+ .map(configService -> (IConfigService) configService)
+ .onFailure(e -> {
+ LOGGER.error("Failed to initialise ConfigService", e);
+ });
+ }
+
private void run() throws Exception {
- Supplier operatorVerticleSupplier = () -> {
- UIDOperatorVerticle verticle = new UIDOperatorVerticle(config, this.clientSideTokenGenerate, siteProvider, clientKeyProvider, clientSideKeypairProvider, getKeyManager(), saltProvider, optOutStore, Clock.systemUTC(), _statsCollectorQueue, new SecureLinkValidatorService(this.serviceLinkProvider, this.serviceProvider), this.shutdownHandler::handleSaltRetrievalResponse);
- return verticle;
- };
+ this.createVertxInstancesMetric();
+ this.createVertxEventLoopsMetric();
- DeploymentOptions options = new DeploymentOptions();
- int svcInstances = this.config.getInteger(Const.Config.ServiceInstancesProp);
- options.setInstances(svcInstances);
+ this.initialiseConfigService()
+ .compose(configService -> {
- Promise compositePromise = Promise.promise();
- List fs = new ArrayList<>();
- fs.add(createAndDeployStatsCollector());
- fs.add(createStoreVerticles());
+ Supplier operatorVerticleSupplier = () -> {
+ UIDOperatorVerticle verticle = new UIDOperatorVerticle(configService, config, this.clientSideTokenGenerate, siteProvider, clientKeyProvider, clientSideKeypairProvider, getKeyManager(), saltProvider, optOutStore, Clock.systemUTC(), _statsCollectorQueue, new SecureLinkValidatorService(this.serviceLinkProvider, this.serviceProvider), this.shutdownHandler::handleSaltRetrievalResponse);
+ return verticle;
+ };
- CompositeFuture.all(fs).onComplete(ar -> {
- if (ar.failed()) compositePromise.fail(new Exception(ar.cause()));
- else compositePromise.complete();
- });
+ DeploymentOptions options = new DeploymentOptions();
+ int svcInstances = this.config.getInteger(Const.Config.ServiceInstancesProp);
+ options.setInstances(svcInstances);
- compositePromise.future()
- .compose(v -> {
- metrics.setup();
- vertx.setPeriodic(60000, id -> metrics.update());
-
- Promise promise = Promise.promise();
- vertx.deployVerticle(operatorVerticleSupplier, options, promise);
- return promise.future();
- })
- .onFailure(t -> {
- LOGGER.error("Failed to bootstrap operator: " + t.getMessage(), new Exception(t));
- vertx.close();
- System.exit(1);
- });
+ Promise compositePromise = Promise.promise();
+ List fs = new ArrayList<>();
+ fs.add(createAndDeployStatsCollector());
+ try {
+ fs.add(createStoreVerticles());
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ CompositeFuture.all(fs).onComplete(ar -> {
+ if (ar.failed()) compositePromise.fail(new Exception(ar.cause()));
+ else compositePromise.complete();
+ });
+
+ return compositePromise.future()
+ .compose(v -> {
+ metrics.setup();
+ vertx.setPeriodic(60000, id -> metrics.update());
+ Promise promise = Promise.promise();
+ vertx.deployVerticle(operatorVerticleSupplier, options, promise);
+ return promise.future();
+ });
+ })
+ .onFailure(t -> {
+ LOGGER.error("Failed to bootstrap operator: " + t.getMessage(), new Exception(t));
+ vertx.close();
+ System.exit(1);
+ });
}
private Future createStoreVerticles() throws Exception {
@@ -306,16 +385,21 @@ private Future createStoreVerticles() throws Exception {
siteProvider.getMetadata();
clientSideKeypairProvider.getMetadata();
}
- clientKeyProvider.getMetadata();
- keysetKeyStore.getMetadata();
- keysetProvider.getMetadata();
- saltProvider.getMetadata();
if (validateServiceLinks) {
serviceProvider.getMetadata();
serviceLinkProvider.getMetadata();
}
+ if (encryptedCloudFilesEnabled) {
+ cloudEncryptionKeyProvider.getMetadata();
+ }
+
+ clientKeyProvider.getMetadata();
+ keysetKeyStore.getMetadata();
+ keysetProvider.getMetadata();
+ saltProvider.getMetadata();
+
// create cloud sync for optout store
OptOutCloudSync optOutCloudSync = new OptOutCloudSync(config, false);
this.optOutStore.registerCloudSync(optOutCloudSync);
@@ -323,10 +407,21 @@ private Future createStoreVerticles() throws Exception {
// create rotating store verticles to poll for updates
Promise promise = Promise.promise();
List fs = new ArrayList<>();
+
if (clientSideTokenGenerate) {
fs.add(createAndDeployRotatingStoreVerticle("site", siteProvider, "site_refresh_ms"));
fs.add(createAndDeployRotatingStoreVerticle("client_side_keypairs", clientSideKeypairProvider, "client_side_keypairs_refresh_ms"));
}
+
+ if (validateServiceLinks) {
+ fs.add(createAndDeployRotatingStoreVerticle("service", serviceProvider, "service_refresh_ms"));
+ fs.add(createAndDeployRotatingStoreVerticle("service_link", serviceLinkProvider, "service_link_refresh_ms"));
+ }
+
+ if (encryptedCloudFilesEnabled) {
+ fs.add(createAndDeployRotatingStoreVerticle("cloud_encryption_keys", cloudEncryptionKeyProvider, "cloud_encryption_keys_refresh_ms"));
+ }
+
fs.add(createAndDeployRotatingStoreVerticle("auth", clientKeyProvider, "auth_refresh_ms"));
fs.add(createAndDeployRotatingStoreVerticle("keyset", keysetProvider, "keyset_refresh_ms"));
fs.add(createAndDeployRotatingStoreVerticle("keysetkey", keysetKeyStore, "keysetkey_refresh_ms"));
@@ -337,10 +432,6 @@ private Future createStoreVerticles() throws Exception {
else promise.complete();
});
- if (validateServiceLinks) {
- fs.add(createAndDeployRotatingStoreVerticle("service", serviceProvider, "service_refresh_ms"));
- fs.add(createAndDeployRotatingStoreVerticle("service_link", serviceLinkProvider, "service_link_refresh_ms"));
- }
return promise.future();
}
@@ -414,7 +505,7 @@ private static Vertx createVertx() {
}
private static void setupMetrics(MicrometerMetricsOptions metricOptions) {
- BackendRegistries.setupBackend(metricOptions);
+ BackendRegistries.setupBackend(metricOptions, null);
MeterRegistry backendRegistry = BackendRegistries.getDefaultNow();
if (backendRegistry instanceof PrometheusMeterRegistry) {
@@ -425,14 +516,8 @@ private static void setupMetrics(MicrometerMetricsOptions metricOptions) {
prometheusRegistry.config()
// providing common renaming for prometheus metric, e.g. "hello.world" to "hello_world"
.meterFilter(new PrometheusRenameFilter())
- .meterFilter(MeterFilter.replaceTagValues(Label.HTTP_PATH.toString(), actualPath -> {
- try {
- String normalized = HttpUtils.normalizePath(actualPath).split("\\?")[0];
- return Endpoints.pathSet().contains(normalized) ? normalized : "/unknown";
- } catch (IllegalArgumentException e) {
- return actualPath;
- }
- }))
+ .meterFilter(MeterFilter.replaceTagValues(Label.HTTP_PATH.toString(),
+ actualPath -> HTTPPathMetricFilter.filterPath(actualPath, Endpoints.pathSet())))
// Don't record metrics for 404s.
.meterFilter(MeterFilter.deny(id ->
id.getName().startsWith(MetricsDomain.HTTP_SERVER.getPrefix()) &&
@@ -467,14 +552,26 @@ public DistributionStatisticConfig configure(Meter.Id id, DistributionStatisticC
.register(globalRegistry);
}
- private Map.Entry createUidClients(Vertx vertx, String attestationUrl, String clientApiToken, Handler> responseWatcher) throws Exception {
+ private void createVertxInstancesMetric() {
+ Gauge.builder("uid2.vertx_service_instances", () -> config.getInteger("service_instances"))
+ .description("gauge for number of vertx service instances requested")
+ .register(Metrics.globalRegistry);
+ }
+
+ private void createVertxEventLoopsMetric() {
+ Gauge.builder("uid2.vertx_event_loop_threads", () -> VertxOptions.DEFAULT_EVENT_LOOP_POOL_SIZE)
+ .description("gauge for number of vertx event loop threads")
+ .register(Metrics.globalRegistry);
+ }
+
+ private Map.Entry createUidClients(Vertx vertx, String attestationUrl, String clientApiToken, Handler> responseWatcher) throws Exception {
AttestationResponseHandler attestationResponseHandler = getAttestationTokenRetriever(vertx, attestationUrl, clientApiToken, responseWatcher);
- UidCoreClient coreClient = new UidCoreClient(clientApiToken, CloudUtils.defaultProxy, attestationResponseHandler);
+ UidCoreClient coreClient = new UidCoreClient(clientApiToken, CloudUtils.defaultProxy, attestationResponseHandler, this.encryptedCloudFilesEnabled);
UidOptOutClient optOutClient = new UidOptOutClient(clientApiToken, CloudUtils.defaultProxy, attestationResponseHandler);
return new AbstractMap.SimpleEntry<>(coreClient, optOutClient);
}
- private AttestationResponseHandler getAttestationTokenRetriever(Vertx vertx, String attestationUrl, String clientApiToken, Handler> responseWatcher) throws Exception {
+ private AttestationResponseHandler getAttestationTokenRetriever(Vertx vertx, String attestationUrl, String clientApiToken, Handler> responseWatcher) throws Exception {
String enclavePlatform = this.config.getString(Const.Config.EnclavePlatformProp);
String operatorType = this.config.getString(Const.Config.OperatorTypeProp, "");
diff --git a/src/main/java/com/uid2/operator/model/AdvertisingTokenInput.java b/src/main/java/com/uid2/operator/model/AdvertisingTokenInput.java
deleted file mode 100644
index b5ffcb89a..000000000
--- a/src/main/java/com/uid2/operator/model/AdvertisingTokenInput.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package com.uid2.operator.model;
-
-import java.time.Instant;
-
-import com.uid2.operator.model.userIdentity.RawUidIdentity;
-import com.uid2.shared.model.TokenVersion;
-
-public class AdvertisingTokenInput extends VersionedToken {
- public final OperatorIdentity operatorIdentity;
- public final SourcePublisher sourcePublisher;
- public final RawUidIdentity rawUidIdentity;
-
- public AdvertisingTokenInput(TokenVersion version, Instant createdAt, Instant expiresAt, OperatorIdentity operatorIdentity,
- SourcePublisher sourcePublisher, RawUidIdentity rawUidIdentity) {
- super(version, createdAt, expiresAt);
- this.operatorIdentity = operatorIdentity;
- this.sourcePublisher = sourcePublisher;
- this.rawUidIdentity = rawUidIdentity;
- }
-}
-
diff --git a/src/main/java/com/uid2/operator/model/AdvertisingTokenRequest.java b/src/main/java/com/uid2/operator/model/AdvertisingTokenRequest.java
new file mode 100644
index 000000000..d63fa66a8
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/AdvertisingTokenRequest.java
@@ -0,0 +1,28 @@
+package com.uid2.operator.model;
+
+import java.time.Instant;
+
+import com.uid2.operator.model.identities.RawUid;
+import com.uid2.operator.util.PrivacyBits;
+import com.uid2.shared.model.TokenVersion;
+
+// class containing enough information to create a new uid token (aka advertising token)
+public class AdvertisingTokenRequest extends VersionedTokenRequest {
+ public final OperatorIdentity operatorIdentity;
+ public final SourcePublisher sourcePublisher;
+ public final RawUid rawUid;
+ public final PrivacyBits privacyBits;
+ public final Instant establishedAt;
+
+ public AdvertisingTokenRequest(TokenVersion version, Instant createdAt, Instant expiresAt, OperatorIdentity operatorIdentity,
+ SourcePublisher sourcePublisher, RawUid rawUid, PrivacyBits privacyBits,
+ Instant establishedAt) {
+ super(version, createdAt, expiresAt);
+ this.operatorIdentity = operatorIdentity;
+ this.sourcePublisher = sourcePublisher;
+ this.rawUid = rawUid;
+ this.privacyBits = privacyBits;
+ this.establishedAt = establishedAt;
+ }
+}
+
diff --git a/src/main/java/com/uid2/operator/model/MapRequest.java b/src/main/java/com/uid2/operator/model/IdentityMapRequestItem.java
similarity index 57%
rename from src/main/java/com/uid2/operator/model/MapRequest.java
rename to src/main/java/com/uid2/operator/model/IdentityMapRequestItem.java
index 925296e44..079af8e76 100644
--- a/src/main/java/com/uid2/operator/model/MapRequest.java
+++ b/src/main/java/com/uid2/operator/model/IdentityMapRequestItem.java
@@ -1,20 +1,19 @@
package com.uid2.operator.model;
-import com.uid2.operator.model.userIdentity.HashedDiiIdentity;
+import com.uid2.operator.model.identities.HashedDii;
import java.time.Instant;
-public final class MapRequest {
- public final HashedDiiIdentity hashedDiiIdentity;
+public final class IdentityMapRequestItem {
+ public final HashedDii hashedDii;
public final OptoutCheckPolicy optoutCheckPolicy;
public final Instant asOf;
- public MapRequest(
- HashedDiiIdentity hashedDiiIdentity,
+ public IdentityMapRequestItem(
+ HashedDii hashedDii,
OptoutCheckPolicy optoutCheckPolicy,
- Instant asOf)
- {
- this.hashedDiiIdentity = hashedDiiIdentity;
+ Instant asOf) {
+ this.hashedDii = hashedDii;
this.optoutCheckPolicy = optoutCheckPolicy;
this.asOf = asOf;
}
diff --git a/src/main/java/com/uid2/operator/model/RawUidResponse.java b/src/main/java/com/uid2/operator/model/IdentityMapResponseItem.java
similarity index 69%
rename from src/main/java/com/uid2/operator/model/RawUidResponse.java
rename to src/main/java/com/uid2/operator/model/IdentityMapResponseItem.java
index 249bef4c5..909596a2f 100644
--- a/src/main/java/com/uid2/operator/model/RawUidResponse.java
+++ b/src/main/java/com/uid2/operator/model/IdentityMapResponseItem.java
@@ -1,13 +1,13 @@
package com.uid2.operator.model;
// Contains the computed raw UID and its bucket ID from identity/map request
-public class RawUidResponse {
- public static RawUidResponse OptoutIdentity = new RawUidResponse(new byte[33], "");
+public class IdentityMapResponseItem {
+ public static final IdentityMapResponseItem OptoutIdentity = new IdentityMapResponseItem(new byte[33], "");
// The raw UID is also known as Advertising Id (historically)
public final byte[] rawUid;
public final String bucketId;
- public RawUidResponse(byte[] rawUid, String bucketId) {
+ public IdentityMapResponseItem(byte[] rawUid, String bucketId) {
this.rawUid = rawUid;
this.bucketId = bucketId;
}
diff --git a/src/main/java/com/uid2/operator/model/IdentityRequest.java b/src/main/java/com/uid2/operator/model/IdentityRequest.java
deleted file mode 100644
index e9a0c96cb..000000000
--- a/src/main/java/com/uid2/operator/model/IdentityRequest.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package com.uid2.operator.model;
-
-import com.uid2.operator.model.userIdentity.HashedDiiIdentity;
-
-public final class IdentityRequest {
- public final SourcePublisher sourcePublisher;
- public final HashedDiiIdentity hashedDiiIdentity;
- public final OptoutCheckPolicy optoutCheckPolicy;
-
- public IdentityRequest(
- SourcePublisher sourcePublisher,
- HashedDiiIdentity hashedDiiIdentity,
- OptoutCheckPolicy tokenGeneratePolicy)
- {
- this.sourcePublisher = sourcePublisher;
- this.hashedDiiIdentity = hashedDiiIdentity;
- this.optoutCheckPolicy = tokenGeneratePolicy;
- }
-
- public boolean shouldCheckOptOut() {
- return optoutCheckPolicy.equals(OptoutCheckPolicy.RespectOptOut);
- }
-}
diff --git a/src/main/java/com/uid2/operator/model/IdentityResponse.java b/src/main/java/com/uid2/operator/model/IdentityResponse.java
deleted file mode 100644
index fc9182650..000000000
--- a/src/main/java/com/uid2/operator/model/IdentityResponse.java
+++ /dev/null
@@ -1,55 +0,0 @@
-package com.uid2.operator.model;
-
-import com.uid2.shared.model.TokenVersion;
-
-import java.time.Instant;
-
-// this defines all the fields for the response of the /token/generate and /client/generate endpoints before they are
-// jsonified
-public class IdentityResponse {
- public static IdentityResponse OptOutIdentityResponse = new IdentityResponse("", null, "", Instant.EPOCH, Instant.EPOCH, Instant.EPOCH);
- private final String advertisingToken;
- private final TokenVersion advertisingTokenVersion;
- private final String refreshToken;
- private final Instant identityExpires;
- private final Instant refreshExpires;
- private final Instant refreshFrom;
-
- public IdentityResponse(String advertisingToken, TokenVersion advertisingTokenVersion, String refreshToken,
- Instant identityExpires, Instant refreshExpires, Instant refreshFrom) {
- this.advertisingToken = advertisingToken;
- this.advertisingTokenVersion = advertisingTokenVersion;
- this.refreshToken = refreshToken;
- this.identityExpires = identityExpires;
- this.refreshExpires = refreshExpires;
- this.refreshFrom = refreshFrom;
- }
-
- public String getAdvertisingToken() {
- return advertisingToken;
- }
-
- public TokenVersion getAdvertisingTokenVersion() {
- return advertisingTokenVersion;
- }
-
- public String getRefreshToken() {
- return refreshToken;
- }
-
- public Instant getIdentityExpires() {
- return identityExpires;
- }
-
- public Instant getRefreshExpires() {
- return refreshExpires;
- }
-
- public Instant getRefreshFrom() {
- return refreshFrom;
- }
-
- public boolean isOptedOut() {
- return advertisingToken == null || advertisingToken.isEmpty();
- }
-}
diff --git a/src/main/java/com/uid2/operator/model/RefreshResponse.java b/src/main/java/com/uid2/operator/model/RefreshResponse.java
deleted file mode 100644
index 2a520fcc4..000000000
--- a/src/main/java/com/uid2/operator/model/RefreshResponse.java
+++ /dev/null
@@ -1,79 +0,0 @@
-package com.uid2.operator.model;
-
-import java.time.Duration;
-
-public class RefreshResponse {
-
- public static RefreshResponse Invalid = new RefreshResponse(Status.Invalid, IdentityResponse.OptOutIdentityResponse);
- public static RefreshResponse Optout = new RefreshResponse(Status.Optout, IdentityResponse.OptOutIdentityResponse);
- public static RefreshResponse Expired = new RefreshResponse(Status.Expired, IdentityResponse.OptOutIdentityResponse);
- public static RefreshResponse Deprecated = new RefreshResponse(Status.Deprecated, IdentityResponse.OptOutIdentityResponse);
- public static RefreshResponse NoActiveKey = new RefreshResponse(Status.NoActiveKey, IdentityResponse.OptOutIdentityResponse);
- private final Status status;
- private final IdentityResponse identityResponse;
- private final Duration durationSinceLastRefresh;
- private final boolean isCstg;
-
- private RefreshResponse(Status status, IdentityResponse identityResponse, Duration durationSinceLastRefresh, boolean isCstg) {
- this.status = status;
- this.identityResponse = identityResponse;
- this.durationSinceLastRefresh = durationSinceLastRefresh;
- this.isCstg = isCstg;
- }
-
- private RefreshResponse(Status status, IdentityResponse identityResponse) {
- this(status, identityResponse, null, false);
- }
-
- public static RefreshResponse createRefreshedResponse(IdentityResponse identityResponse, Duration durationSinceLastRefresh, boolean isCstg) {
- return new RefreshResponse(Status.Refreshed, identityResponse, durationSinceLastRefresh, isCstg);
- }
-
- public Status getStatus() {
- return status;
- }
-
- public IdentityResponse getIdentityResponse() {
- return identityResponse;
- }
-
- public Duration getDurationSinceLastRefresh() {
- return durationSinceLastRefresh;
- }
-
- public boolean isCstg() { return isCstg;}
-
- public boolean isRefreshed() {
- return Status.Refreshed.equals(this.status);
- }
-
- public boolean isOptOut() {
- return Status.Optout.equals(this.status);
- }
-
- public boolean isInvalidToken() {
- return Status.Invalid.equals(this.status);
- }
-
- public boolean isDeprecated() {
- return Status.Deprecated.equals(this.status);
- }
-
- public boolean isExpired() {
- return Status.Expired.equals(this.status);
- }
-
- public boolean noActiveKey() {
- return Status.NoActiveKey.equals(this.status);
- }
-
- public enum Status {
- Refreshed,
- Invalid,
- Optout,
- Expired,
- Deprecated,
- NoActiveKey
- }
-
-}
diff --git a/src/main/java/com/uid2/operator/model/RefreshTokenInput.java b/src/main/java/com/uid2/operator/model/RefreshTokenInput.java
deleted file mode 100644
index 15bef6a0c..000000000
--- a/src/main/java/com/uid2/operator/model/RefreshTokenInput.java
+++ /dev/null
@@ -1,20 +0,0 @@
-package com.uid2.operator.model;
-
-import java.time.Instant;
-
-import com.uid2.operator.model.userIdentity.FirstLevelHashIdentity;
-import com.uid2.shared.model.TokenVersion;
-
-public class RefreshTokenInput extends VersionedToken {
- public final OperatorIdentity operatorIdentity;
- public final SourcePublisher sourcePublisher;
- public final FirstLevelHashIdentity firstLevelHashIdentity;
-
- public RefreshTokenInput(TokenVersion version, Instant createdAt, Instant expiresAt, OperatorIdentity operatorIdentity,
- SourcePublisher sourcePublisher, FirstLevelHashIdentity firstLevelHashIdentity) {
- super(version, createdAt, expiresAt);
- this.operatorIdentity = operatorIdentity;
- this.sourcePublisher = sourcePublisher;
- this.firstLevelHashIdentity = firstLevelHashIdentity;
- }
-}
diff --git a/src/main/java/com/uid2/operator/model/SourcePublisher.java b/src/main/java/com/uid2/operator/model/SourcePublisher.java
index 4f13fd53e..bd19740a1 100644
--- a/src/main/java/com/uid2/operator/model/SourcePublisher.java
+++ b/src/main/java/com/uid2/operator/model/SourcePublisher.java
@@ -3,6 +3,10 @@
// The original publisher that requests to generate a UID token
public class SourcePublisher {
public final int siteId;
+
+ // these 2 values are added into adverting/UID token and refresh token payload but
+ // are not really used for any real purposes currently so sometimes are set to 0
+ // see the constructor below
public final int clientKeyId;
public final long publisherId;
@@ -11,4 +15,10 @@ public SourcePublisher(int siteId, int clientKeyId, long publisherId) {
this.clientKeyId = clientKeyId;
this.publisherId = publisherId;
}
+
+ public SourcePublisher(int siteId) {
+ this.siteId = siteId;
+ this.clientKeyId = 0;
+ this.publisherId = 0;
+ }
}
diff --git a/src/main/java/com/uid2/operator/model/TokenGenerateRequest.java b/src/main/java/com/uid2/operator/model/TokenGenerateRequest.java
new file mode 100644
index 000000000..39f3b56fc
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/TokenGenerateRequest.java
@@ -0,0 +1,40 @@
+package com.uid2.operator.model;
+
+import com.uid2.operator.model.identities.HashedDii;
+import com.uid2.operator.util.PrivacyBits;
+
+import java.time.Instant;
+
+public final class TokenGenerateRequest {
+ public final SourcePublisher sourcePublisher;
+ public final HashedDii hashedDii;
+ public final OptoutCheckPolicy optoutCheckPolicy;
+
+ public final PrivacyBits privacyBits;
+ public final Instant establishedAt;
+
+ public TokenGenerateRequest(
+ SourcePublisher sourcePublisher,
+ HashedDii hashedDii,
+ OptoutCheckPolicy tokenGeneratePolicy,
+ PrivacyBits privacyBits,
+ Instant establishedAt) {
+ this.sourcePublisher = sourcePublisher;
+ this.hashedDii = hashedDii;
+ this.optoutCheckPolicy = tokenGeneratePolicy;
+ this.privacyBits = privacyBits;
+ this.establishedAt = establishedAt;
+ }
+
+ public TokenGenerateRequest(
+ SourcePublisher sourcePublisher,
+ HashedDii hashedDii,
+ OptoutCheckPolicy tokenGeneratePolicy) {
+ this(sourcePublisher, hashedDii, tokenGeneratePolicy, PrivacyBits.DEFAULT, Instant.now());
+
+ }
+
+ public boolean shouldCheckOptOut() {
+ return optoutCheckPolicy.equals(OptoutCheckPolicy.RespectOptOut);
+ }
+}
diff --git a/src/main/java/com/uid2/operator/model/TokenGenerateResponse.java b/src/main/java/com/uid2/operator/model/TokenGenerateResponse.java
new file mode 100644
index 000000000..8c7d273f9
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/TokenGenerateResponse.java
@@ -0,0 +1,81 @@
+package com.uid2.operator.model;
+
+import com.uid2.shared.model.TokenVersion;
+import io.vertx.core.json.JsonObject;
+
+import java.time.Instant;
+
+// this defines all the fields for the response of the /token/generate and /client/generate endpoints before they are
+// jsonified
+// todo: can be converted to record later
+public class TokenGenerateResponse {
+ public static final TokenGenerateResponse OptOutResponse = new TokenGenerateResponse("", null, "", Instant.EPOCH, Instant.EPOCH, Instant.EPOCH);
+
+ //aka UID token
+ private final String advertisingToken;
+ private final TokenVersion advertisingTokenVersion;
+ private final String refreshToken;
+ // when the advertising token/uid token expires
+ private final Instant identityExpires;
+ private final Instant refreshExpires;
+ private final Instant refreshFrom;
+
+ public TokenGenerateResponse(String advertisingToken, TokenVersion advertisingTokenVersion, String refreshToken,
+ Instant identityExpires, Instant refreshExpires, Instant refreshFrom) {
+ this.advertisingToken = advertisingToken;
+ this.advertisingTokenVersion = advertisingTokenVersion;
+ this.refreshToken = refreshToken;
+ this.identityExpires = identityExpires;
+ this.refreshExpires = refreshExpires;
+ this.refreshFrom = refreshFrom;
+ }
+
+ public String getAdvertisingToken() {
+ return advertisingToken;
+ }
+
+ public TokenVersion getAdvertisingTokenVersion() {
+ return advertisingTokenVersion;
+ }
+
+ public String getRefreshToken() {
+ return refreshToken;
+ }
+
+ public Instant getIdentityExpires() {
+ return identityExpires;
+ }
+
+ public Instant getRefreshExpires() {
+ return refreshExpires;
+ }
+
+ public Instant getRefreshFrom() {
+ return refreshFrom;
+ }
+
+ public boolean isOptedOut() {
+ return advertisingToken == null || advertisingToken.isEmpty();
+ }
+
+ // for v1/v2 token/generate and token/refresh and client/generate (CSTG) endpoints
+ public JsonObject toJsonV1() {
+ final JsonObject json = new JsonObject();
+ json.put("advertising_token", getAdvertisingToken());
+ json.put("refresh_token", getRefreshToken());
+ json.put("identity_expires", getIdentityExpires().toEpochMilli());
+ json.put("refresh_expires", getRefreshExpires().toEpochMilli());
+ json.put("refresh_from", getRefreshFrom().toEpochMilli());
+ return json;
+ }
+
+ // for the original/legacy token/generate and token/refresh endpoint
+ public JsonObject toJsonV0() {
+ final JsonObject json = new JsonObject();
+ json.put("advertisement_token", getAdvertisingToken());
+ json.put("advertising_token", getAdvertisingToken());
+ json.put("refresh_token", getRefreshToken());
+
+ return json;
+ }
+}
diff --git a/src/main/java/com/uid2/operator/model/TokenRefreshRequest.java b/src/main/java/com/uid2/operator/model/TokenRefreshRequest.java
new file mode 100644
index 000000000..e2e51971a
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/TokenRefreshRequest.java
@@ -0,0 +1,26 @@
+package com.uid2.operator.model;
+
+import java.time.Instant;
+
+import com.uid2.operator.model.identities.FirstLevelHash;
+import com.uid2.operator.util.PrivacyBits;
+import com.uid2.shared.model.TokenVersion;
+
+// class containing enough data to create a new refresh token
+public class TokenRefreshRequest extends VersionedTokenRequest {
+ public final OperatorIdentity operatorIdentity;
+ public final SourcePublisher sourcePublisher;
+ public final FirstLevelHash firstLevelHash;
+ // by default, inherited from the previous refresh token's privacy bits
+ public final PrivacyBits privacyBits;
+
+
+ public TokenRefreshRequest(TokenVersion version, Instant createdAt, Instant expiresAt, OperatorIdentity operatorIdentity,
+ SourcePublisher sourcePublisher, FirstLevelHash firstLevelHash, PrivacyBits privacyBits) {
+ super(version, createdAt, expiresAt);
+ this.operatorIdentity = operatorIdentity;
+ this.sourcePublisher = sourcePublisher;
+ this.firstLevelHash = firstLevelHash;
+ this.privacyBits = privacyBits;
+ }
+}
diff --git a/src/main/java/com/uid2/operator/model/TokenRefreshResponse.java b/src/main/java/com/uid2/operator/model/TokenRefreshResponse.java
new file mode 100644
index 000000000..40e5d73c9
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/TokenRefreshResponse.java
@@ -0,0 +1,80 @@
+package com.uid2.operator.model;
+
+import java.time.Duration;
+
+public class TokenRefreshResponse {
+
+ public static final TokenRefreshResponse Invalid = new TokenRefreshResponse(Status.Invalid,
+ TokenGenerateResponse.OptOutResponse);
+ public static final TokenRefreshResponse Optout = new TokenRefreshResponse(Status.Optout, TokenGenerateResponse.OptOutResponse);
+ public static final TokenRefreshResponse Expired = new TokenRefreshResponse(Status.Expired, TokenGenerateResponse.OptOutResponse);
+ public static final TokenRefreshResponse Deprecated = new TokenRefreshResponse(Status.Deprecated, TokenGenerateResponse.OptOutResponse);
+ public static final TokenRefreshResponse NoActiveKey = new TokenRefreshResponse(Status.NoActiveKey, TokenGenerateResponse.OptOutResponse);
+ private final Status status;
+ private final TokenGenerateResponse tokenGenerateResponse;
+ private final Duration durationSinceLastRefresh;
+ private final boolean isCstg;
+
+ private TokenRefreshResponse(Status status, TokenGenerateResponse tokenGenerateResponse, Duration durationSinceLastRefresh, boolean isCstg) {
+ this.status = status;
+ this.tokenGenerateResponse = tokenGenerateResponse;
+ this.durationSinceLastRefresh = durationSinceLastRefresh;
+ this.isCstg = isCstg;
+ }
+
+ private TokenRefreshResponse(Status status, TokenGenerateResponse tokenGenerateResponse) {
+ this(status, tokenGenerateResponse, null, false);
+ }
+
+ public static TokenRefreshResponse createRefreshedResponse(TokenGenerateResponse tokenGenerateResponse, Duration durationSinceLastRefresh, boolean isCstg) {
+ return new TokenRefreshResponse(Status.Refreshed, tokenGenerateResponse, durationSinceLastRefresh, isCstg);
+ }
+
+ public Status getStatus() {
+ return status;
+ }
+
+ public TokenGenerateResponse getIdentityResponse() {
+ return tokenGenerateResponse;
+ }
+
+ public Duration getDurationSinceLastRefresh() {
+ return durationSinceLastRefresh;
+ }
+
+ public boolean isCstg() { return isCstg;}
+
+ public boolean isRefreshed() {
+ return Status.Refreshed.equals(this.status);
+ }
+
+ public boolean isOptOut() {
+ return Status.Optout.equals(this.status);
+ }
+
+ public boolean isInvalidToken() {
+ return Status.Invalid.equals(this.status);
+ }
+
+ public boolean isDeprecated() {
+ return Status.Deprecated.equals(this.status);
+ }
+
+ public boolean isExpired() {
+ return Status.Expired.equals(this.status);
+ }
+
+ public boolean noActiveKey() {
+ return Status.NoActiveKey.equals(this.status);
+ }
+
+ public enum Status {
+ Refreshed,
+ Invalid,
+ Optout,
+ Expired,
+ Deprecated,
+ NoActiveKey
+ }
+
+}
diff --git a/src/main/java/com/uid2/operator/model/VersionedToken.java b/src/main/java/com/uid2/operator/model/VersionedTokenRequest.java
similarity index 68%
rename from src/main/java/com/uid2/operator/model/VersionedToken.java
rename to src/main/java/com/uid2/operator/model/VersionedTokenRequest.java
index 5be86b80e..5cc9c5335 100644
--- a/src/main/java/com/uid2/operator/model/VersionedToken.java
+++ b/src/main/java/com/uid2/operator/model/VersionedTokenRequest.java
@@ -1,16 +1,16 @@
package com.uid2.operator.model;
import java.time.Instant;
-import java.util.Objects;
+
import com.uid2.shared.model.TokenVersion;
-public abstract class VersionedToken {
+public abstract class VersionedTokenRequest {
public final TokenVersion version;
public final Instant createdAt;
public final Instant expiresAt;
- public VersionedToken(TokenVersion version, Instant createdAt, Instant expiresAt) {
+ public VersionedTokenRequest(TokenVersion version, Instant createdAt, Instant expiresAt) {
this.version = version;
this.createdAt = createdAt;
this.expiresAt = expiresAt;
diff --git a/src/main/java/com/uid2/operator/model/IdentityType.java b/src/main/java/com/uid2/operator/model/identities/DiiType.java
similarity index 67%
rename from src/main/java/com/uid2/operator/model/IdentityType.java
rename to src/main/java/com/uid2/operator/model/identities/DiiType.java
index b64817df5..062b55d35 100644
--- a/src/main/java/com/uid2/operator/model/IdentityType.java
+++ b/src/main/java/com/uid2/operator/model/identities/DiiType.java
@@ -1,15 +1,15 @@
-package com.uid2.operator.model;
+package com.uid2.operator.model.identities;
import com.uid2.operator.vertx.ClientInputValidationException;
-public enum IdentityType {
+public enum DiiType {
Email(0), Phone(1);
public final int value;
- IdentityType(int value) { this.value = value; }
+ DiiType(int value) { this.value = value; }
- public static IdentityType fromValue(int value) {
+ public static DiiType fromValue(int value) {
switch (value) {
case 0: return Email;
case 1: return Phone;
diff --git a/src/main/java/com/uid2/operator/model/identities/FirstLevelHash.java b/src/main/java/com/uid2/operator/model/identities/FirstLevelHash.java
new file mode 100644
index 000000000..49b2728f4
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/identities/FirstLevelHash.java
@@ -0,0 +1,19 @@
+package com.uid2.operator.model.identities;
+
+import java.time.Instant;
+import java.util.Arrays;
+
+/**
+ * Contains a first level salted hash computed from Hashed DII (email/phone number)
+ * @param establishedAt for brand new token generation, it should be the time it is generated if the first level hash is from token/refresh call, it will be when the raw UID was originally created in the earliest token generation
+ */
+public record FirstLevelHash(IdentityScope identityScope, DiiType diiType, byte[] firstLevelHash,
+ Instant establishedAt) {
+
+ // explicitly not checking establishedAt - this is only for making sure the first level hash matches a new input
+ public boolean matches(FirstLevelHash that) {
+ return this.identityScope.equals(that.identityScope) &&
+ this.diiType.equals(that.diiType) &&
+ Arrays.equals(this.firstLevelHash, that.firstLevelHash);
+ }
+}
diff --git a/src/main/java/com/uid2/operator/model/identities/HashedDii.java b/src/main/java/com/uid2/operator/model/identities/HashedDii.java
new file mode 100644
index 000000000..64c7bbf0f
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/identities/HashedDii.java
@@ -0,0 +1,7 @@
+package com.uid2.operator.model.identities;
+
+// Contains a hash Directly Identifying Information (DII) (email or phone) see https://unifiedid.com/docs/ref-info/glossary-uid#gl-dii
+// This hash can either be computed from a raw email/phone number DII input or provided by the UID Participant directly
+//
+public record HashedDii(IdentityScope identityScope, DiiType diiType, byte[] hashedDii) {
+}
diff --git a/src/main/java/com/uid2/operator/IdentityConst.java b/src/main/java/com/uid2/operator/model/identities/IdentityConst.java
similarity index 82%
rename from src/main/java/com/uid2/operator/IdentityConst.java
rename to src/main/java/com/uid2/operator/model/identities/IdentityConst.java
index 9362ade6e..63fa62f96 100644
--- a/src/main/java/com/uid2/operator/IdentityConst.java
+++ b/src/main/java/com/uid2/operator/model/identities/IdentityConst.java
@@ -1,4 +1,4 @@
-package com.uid2.operator;
+package com.uid2.operator.model.identities;
import com.uid2.operator.service.EncodingUtils;
@@ -7,13 +7,13 @@ public class IdentityConst {
public static final String OptOutTokenIdentityForEmail = "optout@unifiedid.com";
public static final String OptOutTokenIdentityForPhone = "+00000000001";
- // DIIs for for testing with token/validate endpoint, see https://unifiedid.com/docs/endpoints/post-token-validate
+ // DIIs for testing with token/validate endpoint, see https://unifiedid.com/docs/endpoints/post-token-validate
public static final String ValidateIdentityForEmail = "validate@example.com";
public static final String ValidateIdentityForPhone = "+12345678901";
public static final byte[] ValidateIdentityForEmailHash = EncodingUtils.getSha256Bytes(IdentityConst.ValidateIdentityForEmail);
public static final byte[] ValidateIdentityForPhoneHash = EncodingUtils.getSha256Bytes(IdentityConst.ValidateIdentityForPhone);
- // DIIs to use when you want to generate a optout response in token generation or identity map
+ // DIIs to use when you want to generate an optout response in token generation or identity map
public static final String OptOutIdentityForEmail = "optout@example.com";
public static final String OptOutIdentityForPhone = "+00000000000";
diff --git a/src/main/java/com/uid2/operator/model/IdentityScope.java b/src/main/java/com/uid2/operator/model/identities/IdentityScope.java
similarity index 94%
rename from src/main/java/com/uid2/operator/model/IdentityScope.java
rename to src/main/java/com/uid2/operator/model/identities/IdentityScope.java
index 0bff1edc1..3dc19a764 100644
--- a/src/main/java/com/uid2/operator/model/IdentityScope.java
+++ b/src/main/java/com/uid2/operator/model/identities/IdentityScope.java
@@ -1,4 +1,4 @@
-package com.uid2.operator.model;
+package com.uid2.operator.model.identities;
import com.uid2.operator.vertx.ClientInputValidationException;
diff --git a/src/main/java/com/uid2/operator/model/identities/RawUid.java b/src/main/java/com/uid2/operator/model/identities/RawUid.java
new file mode 100644
index 000000000..4ae619d00
--- /dev/null
+++ b/src/main/java/com/uid2/operator/model/identities/RawUid.java
@@ -0,0 +1,13 @@
+package com.uid2.operator.model.identities;
+
+import java.util.Arrays;
+
+// A raw UID is stored inside
+public record RawUid(IdentityScope identityScope, DiiType diiType, byte[] rawUid) {
+
+ public boolean matches(RawUid that) {
+ return this.identityScope.equals(that.identityScope) &&
+ this.diiType.equals(that.diiType) &&
+ Arrays.equals(this.rawUid, that.rawUid);
+ }
+}
diff --git a/src/main/java/com/uid2/operator/model/userIdentity/FirstLevelHashIdentity.java b/src/main/java/com/uid2/operator/model/userIdentity/FirstLevelHashIdentity.java
deleted file mode 100644
index 64b8bcedd..000000000
--- a/src/main/java/com/uid2/operator/model/userIdentity/FirstLevelHashIdentity.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package com.uid2.operator.model.userIdentity;
-
-import com.uid2.operator.model.IdentityScope;
-import com.uid2.operator.model.IdentityType;
-
-import java.time.Instant;
-import java.util.Arrays;
-
-// Contains a first level salted hash computed from Hashed DII (email/phone number)
-public class FirstLevelHashIdentity extends UserIdentity {
- public final byte[] firstLevelHash;
-
- public FirstLevelHashIdentity(IdentityScope identityScope, IdentityType identityType, byte[] firstLevelHash, int privacyBits,
- Instant establishedAt, Instant refreshedAt) {
- super(identityScope, identityType, privacyBits, establishedAt, refreshedAt);
- this.firstLevelHash = firstLevelHash;
- }
-
- public boolean matches(FirstLevelHashIdentity that) {
- return this.identityScope.equals(that.identityScope) &&
- this.identityType.equals(that.identityType) &&
- Arrays.equals(this.firstLevelHash, that.firstLevelHash);
- }
-}
diff --git a/src/main/java/com/uid2/operator/model/userIdentity/HashedDiiIdentity.java b/src/main/java/com/uid2/operator/model/userIdentity/HashedDiiIdentity.java
deleted file mode 100644
index dad862f21..000000000
--- a/src/main/java/com/uid2/operator/model/userIdentity/HashedDiiIdentity.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package com.uid2.operator.model.userIdentity;
-
-import com.uid2.operator.model.IdentityScope;
-import com.uid2.operator.model.IdentityType;
-
-import java.time.Instant;
-
-// Contains a hash DII,
-// This hash can either be computed from a raw email/phone number DII input or provided by the UID Participant directly
-public class HashedDiiIdentity extends UserIdentity {
- public final byte[] hashedDii;
-
- public HashedDiiIdentity(IdentityScope identityScope, IdentityType identityType, byte[] hashedDii, int privacyBits,
- Instant establishedAt, Instant refreshedAt) {
- super(identityScope, identityType, privacyBits, establishedAt, refreshedAt);
- this.hashedDii = hashedDii;
- }
-}
diff --git a/src/main/java/com/uid2/operator/model/userIdentity/RawUidIdentity.java b/src/main/java/com/uid2/operator/model/userIdentity/RawUidIdentity.java
deleted file mode 100644
index 4e15c6ff0..000000000
--- a/src/main/java/com/uid2/operator/model/userIdentity/RawUidIdentity.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package com.uid2.operator.model.userIdentity;
-
-import com.uid2.operator.model.IdentityScope;
-import com.uid2.operator.model.IdentityType;
-
-import java.time.Instant;
-import java.util.Arrays;
-
-// A raw UID is stored inside
-public class RawUidIdentity extends UserIdentity {
- public final byte[] rawUid;
-
- public RawUidIdentity(IdentityScope identityScope, IdentityType identityType, byte[] rawUid, int privacyBits,
- Instant establishedAt, Instant refreshedAt) {
- super(identityScope, identityType, privacyBits, establishedAt, refreshedAt);
- this.rawUid = rawUid;
- }
-
- public boolean matches(RawUidIdentity that) {
- return this.identityScope.equals(that.identityScope) &&
- this.identityType.equals(that.identityType) &&
- Arrays.equals(this.rawUid, that.rawUid);
- }
-}
diff --git a/src/main/java/com/uid2/operator/model/userIdentity/UserIdentity.java b/src/main/java/com/uid2/operator/model/userIdentity/UserIdentity.java
deleted file mode 100644
index 1391b7d75..000000000
--- a/src/main/java/com/uid2/operator/model/userIdentity/UserIdentity.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package com.uid2.operator.model.userIdentity;
-
-import com.uid2.operator.model.IdentityScope;
-import com.uid2.operator.model.IdentityType;
-
-import java.time.Instant;
-
-//base class for all other HshedDii/FirstLevelHash/RawUIDIdentity class and define the basic common fields
-public abstract class UserIdentity {
-
- public final IdentityScope identityScope;
- public final IdentityType identityType;
- public final int privacyBits;
- public final Instant establishedAt;
- public final Instant refreshedAt;
-
- public UserIdentity(IdentityScope identityScope, IdentityType identityType, int privacyBits, Instant establishedAt, Instant refreshedAt) {
- this.identityScope = identityScope;
- this.identityType = identityType;
- this.privacyBits = privacyBits;
- this.establishedAt = establishedAt;
- this.refreshedAt = refreshedAt;
- }
-}
diff --git a/src/main/java/com/uid2/operator/monitoring/StatsCollectorHandler.java b/src/main/java/com/uid2/operator/monitoring/StatsCollectorHandler.java
index 04a36d9c1..ebeb304d5 100644
--- a/src/main/java/com/uid2/operator/monitoring/StatsCollectorHandler.java
+++ b/src/main/java/com/uid2/operator/monitoring/StatsCollectorHandler.java
@@ -20,7 +20,9 @@ public StatsCollectorHandler(IStatsCollectorQueue _statCollectorQueue, Vertx ver
@Override
public void handle(RoutingContext routingContext) {
- assert routingContext != null;
+ if (routingContext == null) {
+ throw new NullPointerException();
+ }
//setAuthClient() has not yet been called, so getAuthClient() would return null. This is resolved by using addBodyEndHandler()
routingContext.addBodyEndHandler(v -> addStatsMessageToQueue(routingContext));
diff --git a/src/main/java/com/uid2/operator/monitoring/StatsCollectorVerticle.java b/src/main/java/com/uid2/operator/monitoring/StatsCollectorVerticle.java
index 8e49ec1f8..77e179e77 100644
--- a/src/main/java/com/uid2/operator/monitoring/StatsCollectorVerticle.java
+++ b/src/main/java/com/uid2/operator/monitoring/StatsCollectorVerticle.java
@@ -88,8 +88,6 @@ public void handleMessage(Message message) {
return;
}
- assert messageItem != null;
-
String path = messageItem.getPath();
String apiVersion = "v0";
String endpoint = path.substring(1);
diff --git a/src/main/java/com/uid2/operator/monitoring/TokenResponseStatsCollector.java b/src/main/java/com/uid2/operator/monitoring/TokenResponseStatsCollector.java
index fc28bba70..c5f46cc7e 100644
--- a/src/main/java/com/uid2/operator/monitoring/TokenResponseStatsCollector.java
+++ b/src/main/java/com/uid2/operator/monitoring/TokenResponseStatsCollector.java
@@ -1,6 +1,6 @@
package com.uid2.operator.monitoring;
-import com.uid2.operator.model.RefreshResponse;
+import com.uid2.operator.model.TokenRefreshResponse;
import com.uid2.operator.vertx.UIDOperatorVerticle;
import com.uid2.shared.model.TokenVersion;
import com.uid2.shared.store.ISiteStore;
@@ -69,7 +69,7 @@ private static void recordInternal(ISiteStore siteStore, Integer siteId, Endpoin
builder.register(Metrics.globalRegistry).increment();
}
- public static void recordRefresh(ISiteStore siteStore, Integer siteId, Endpoint endpoint, RefreshResponse refreshResponse, PlatformType platformType) {
+ public static void recordRefresh(ISiteStore siteStore, Integer siteId, Endpoint endpoint, TokenRefreshResponse refreshResponse, PlatformType platformType) {
if (!refreshResponse.isRefreshed()) {
if (refreshResponse.isOptOut() || refreshResponse.isDeprecated()) {
recordInternal(siteStore, siteId, endpoint, ResponseStatus.OptOut, refreshResponse.getIdentityResponse().getAdvertisingTokenVersion(), refreshResponse.isCstg(), platformType);
diff --git a/src/main/java/com/uid2/operator/reader/ApiStoreReader.java b/src/main/java/com/uid2/operator/reader/ApiStoreReader.java
new file mode 100644
index 000000000..7b39c9a83
--- /dev/null
+++ b/src/main/java/com/uid2/operator/reader/ApiStoreReader.java
@@ -0,0 +1,58 @@
+package com.uid2.operator.reader;
+
+import com.uid2.shared.cloud.DownloadCloudStorage;
+import com.uid2.shared.store.ScopedStoreReader;
+import com.uid2.shared.store.parser.Parser;
+import com.uid2.shared.store.parser.ParsingResult;
+import com.uid2.shared.store.scope.StoreScope;
+import io.vertx.core.json.JsonArray;
+import io.vertx.core.json.JsonObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+
+public class ApiStoreReader extends ScopedStoreReader {
+ private static final Logger LOGGER = LoggerFactory.getLogger(ApiStoreReader.class);
+
+ public ApiStoreReader(DownloadCloudStorage fileStreamProvider, StoreScope scope, Parser parser, String dataTypeName) {
+ super(fileStreamProvider, scope, parser, dataTypeName);
+ }
+
+
+ public long loadContent(JsonObject contents) throws Exception {
+ return loadContent(contents, dataTypeName);
+ }
+
+ @Override
+ public long loadContent(JsonObject contents, String dataType) throws IOException {
+ if (contents == null) {
+ throw new IllegalArgumentException(String.format("No contents provided for loading data type %s, cannot load content", dataType));
+ }
+
+ try {
+ JsonArray dataArray = contents.getJsonArray(dataType);
+ if (dataArray == null) {
+ throw new IllegalArgumentException(String.format("No array of type: %s, found in the contents", dataType));
+ }
+
+ String jsonString = dataArray.toString();
+ InputStream inputStream = new ByteArrayInputStream(jsonString.getBytes(StandardCharsets.UTF_8));
+
+ ParsingResult parsed = parser.deserialize(inputStream);
+ latestSnapshot.set(parsed.getData());
+
+ final int count = parsed.getCount();
+ latestEntryCount.set(count);
+ LOGGER.info(String.format("Loaded %d %s", count, dataType));
+ return count;
+ } catch (Exception e) {
+ LOGGER.error(String.format("Unable to load %s", dataType));
+ throw e;
+ }
+ }
+}
+
diff --git a/src/main/java/com/uid2/operator/reader/RotatingCloudEncryptionKeyApiProvider.java b/src/main/java/com/uid2/operator/reader/RotatingCloudEncryptionKeyApiProvider.java
new file mode 100644
index 000000000..838bd8b0b
--- /dev/null
+++ b/src/main/java/com/uid2/operator/reader/RotatingCloudEncryptionKeyApiProvider.java
@@ -0,0 +1,34 @@
+package com.uid2.operator.reader;
+
+import com.uid2.shared.cloud.DownloadCloudStorage;
+import com.uid2.shared.model.CloudEncryptionKey;
+import com.uid2.shared.store.CloudPath;
+import com.uid2.shared.store.parser.CloudEncryptionKeyParser;
+import com.uid2.shared.store.reader.RotatingCloudEncryptionKeyProvider;
+import com.uid2.shared.store.scope.StoreScope;
+import io.vertx.core.json.JsonObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.time.Instant;
+import java.util.*;
+
+public class RotatingCloudEncryptionKeyApiProvider extends RotatingCloudEncryptionKeyProvider {
+ private static final Logger LOGGER = LoggerFactory.getLogger(RotatingCloudEncryptionKeyApiProvider.class);
+
+ public RotatingCloudEncryptionKeyApiProvider(DownloadCloudStorage fileStreamProvider, StoreScope scope) {
+ super(fileStreamProvider, scope, new ApiStoreReader<>(fileStreamProvider, scope, new CloudEncryptionKeyParser(), "cloud_encryption_keys"));
+ }
+
+ public RotatingCloudEncryptionKeyApiProvider(DownloadCloudStorage fileStreamProvider, StoreScope scope, ApiStoreReader