Skip to content

Commit 04cb2e0

Browse files
SV1_SCAN_BIGQUERY deployment var; allow BQ-only cleanup (#1050) [ci]
* SV1_SCAN_BIGQUERY env option * remove bqdatastream cleanup, just leave the user alive Revocations still not enough: - gcp:sql:User sv-1-user-bqdatastream deleting (1s) error: sdk-v2/provider2.go:566: sdk.helper_schema: Error, failed to deleteuser bqdatastream in instance sv-1-cn-apps-pg-2964ec6: googleapi: Error 400: Invalid request: failed to delete user bqdatastream: . role "bqdatastream" cannot be dropped because some objects depend on it Details: 32 objects in database scan_sv_1., invalid: [email protected] Signed-off-by: Stephen Compall <[email protected]>
1 parent 5c3016c commit 04cb2e0

File tree

2 files changed

+66
-30
lines changed

2 files changed

+66
-30
lines changed

cluster/pulumi/canton-network/src/bigQuery.ts

Lines changed: 60 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -311,8 +311,9 @@ function createPostgresReplicatorUser(
311311
postgres: CloudPostgres,
312312
password: PostgresPassword
313313
): gcp.sql.User {
314+
const name = `${postgres.namespace.logicalName}-user-${replicatorUserName}`;
314315
return new gcp.sql.User(
315-
`${postgres.namespace.logicalName}-user-${replicatorUserName}`,
316+
name,
316317
{
317318
instance: postgres.databaseInstance.name,
318319
name: replicatorUserName,
@@ -321,12 +322,51 @@ function createPostgresReplicatorUser(
321322
{
322323
parent: postgres,
323324
deletedWith: postgres.databaseInstance,
325+
retainOnDelete: true,
324326
protect: protectCloudSql,
325327
dependsOn: [postgres.databaseInstance, password.secret],
326328
}
327329
);
328330
}
329331

332+
function databaseCommandBracket(postgres: CloudPostgres) {
333+
return {
334+
header: pulumi.interpolate`
335+
set -e
336+
TMP_BUCKET="da-cn-tmp-sql-$(date +%s)-$RANDOM"
337+
TMP_SQL_FILE="$(mktemp tmp_pub_rep_slots_XXXXXXXXXX.sql --tmpdir)"
338+
GCS_URI="gs://$TMP_BUCKET/$(basename "$TMP_SQL_FILE")"
339+
340+
# create temporary bucket
341+
gsutil mb --pap enforced -p "${privateNetwork.project}" \
342+
-l "${cloudsdkComputeRegion()}" "gs://$TMP_BUCKET"
343+
344+
# grant DB service account access to the bucket
345+
gsutil iam ch "serviceAccount:${postgres.databaseInstance.serviceAccountEmailAddress}:roles/storage.objectAdmin" \
346+
"gs://$TMP_BUCKET"
347+
348+
cat > "$TMP_SQL_FILE" <<'EOT'
349+
`,
350+
footer: pulumi.interpolate`
351+
EOT
352+
353+
# upload SQL to temporary bucket
354+
gsutil cp "$TMP_SQL_FILE" "$GCS_URI"
355+
356+
# then import into Cloud SQL
357+
gcloud sql import sql ${postgres.databaseInstance.name} "$GCS_URI" \
358+
--database="${scanAppDatabaseName(postgres)}" \
359+
--user="${postgres.user.name}" \
360+
--quiet
361+
362+
# cleanup: remove the file from GCS, delete the bucket, remove the local file
363+
gsutil rm "$GCS_URI"
364+
gsutil rb "gs://$TMP_BUCKET"
365+
rm "$TMP_SQL_FILE"
366+
`,
367+
};
368+
}
369+
330370
/*
331371
For the SQL below to apply, the user/operator applying the pulumi
332372
needs the 'Cloud SQL Editor' IAM role in the relevant GCP project
@@ -339,27 +379,15 @@ function createPublicationAndReplicationSlots(
339379
) {
340380
const dbName = scanAppDatabaseName(postgres);
341381
const schemaName = dbName;
382+
const { header, footer } = databaseCommandBracket(postgres);
342383
return new command.local.Command(
343384
`${postgres.namespace.logicalName}-${replicatorUserName}-pub-replicate-slots`,
344385
{
345386
// TODO (#19809) refactor to invoke external shell script
346387
// ----
347388
// from https://cloud.google.com/datastream/docs/configure-cloudsql-psql
348389
create: pulumi.interpolate`
349-
set -e
350-
TMP_BUCKET="da-cn-tmp-sql-$(date +%s)-$RANDOM"
351-
TMP_SQL_FILE="$(mktemp tmp_pub_rep_slots_XXXXXXXXXX.sql --tmpdir)"
352-
GCS_URI="gs://$TMP_BUCKET/$(basename "$TMP_SQL_FILE")"
353-
354-
# create temporary bucket
355-
gsutil mb --pap enforced -p "${privateNetwork.project}" \
356-
-l "${cloudsdkComputeRegion()}" "gs://$TMP_BUCKET"
357-
358-
# grant DB service account access to the bucket
359-
gsutil iam ch "serviceAccount:${postgres.databaseInstance.serviceAccountEmailAddress}:roles/storage.objectAdmin" \
360-
"gs://$TMP_BUCKET"
361-
362-
cat > "$TMP_SQL_FILE" <<'EOT'
390+
${header}
363391
DO $$
364392
DECLARE
365393
migration_complete BOOLEAN := FALSE;
@@ -414,21 +442,23 @@ function createPublicationAndReplicationSlots(
414442
ALTER DEFAULT PRIVILEGES IN SCHEMA ${schemaName}
415443
GRANT SELECT ON TABLES TO ${replicatorUserName};
416444
COMMIT;
417-
EOT
418-
419-
# upload SQL to temporary bucket
420-
gsutil cp "$TMP_SQL_FILE" "$GCS_URI"
421-
422-
# then import into Cloud SQL
423-
gcloud sql import sql ${postgres.databaseInstance.name} "$GCS_URI" \
424-
--database="${scanAppDatabaseName(postgres)}" \
425-
--user="${postgres.user.name}" \
426-
--quiet
427-
428-
# cleanup: remove the file from GCS, delete the bucket, remove the local file
429-
gsutil rm "$GCS_URI"
430-
gsutil rb "gs://$TMP_BUCKET"
431-
rm "$TMP_SQL_FILE"
445+
${footer}
446+
`,
447+
delete: pulumi.interpolate`
448+
${header}
449+
DO $$
450+
BEGIN
451+
IF EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = '${replicationSlotName}') THEN
452+
PERFORM PG_DROP_REPLICATION_SLOT('${replicationSlotName}');
453+
END IF;
454+
END $$;
455+
DO $$
456+
BEGIN
457+
IF EXISTS (SELECT 1 FROM pg_publication WHERE pubname = '${publicationName}') THEN
458+
DROP PUBLICATION ${publicationName};
459+
END IF;
460+
END $$;
461+
${footer}
432462
`,
433463
},
434464
{

cluster/pulumi/common-sv/src/svConfigs.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ import { StaticSvConfig } from './config';
1515
import { dsoSize } from './dsoConfig';
1616
import { cometbftRetainBlocks } from './synchronizer/cometbftConfig';
1717

18+
const sv1ScanBigQuery = spliceEnvConfig.envFlag('SV1_SCAN_BIGQUERY', false);
19+
1820
const svCometBftSecrets: pulumi.Output<SvCometBftKeys>[] = isMainNet
1921
? [svCometBftKeysFromSecret('sv1-cometbft-keys')]
2022
: [
@@ -58,6 +60,9 @@ export const svConfigs: StaticSvConfig[] = isMainNet
5860
},
5961
},
6062
sweep: sweepConfigFromEnv('SV1'),
63+
...(sv1ScanBigQuery
64+
? { scanBigQuery: { dataset: 'mainnet_da2_scan', prefix: 'da2' } }
65+
: {}),
6166
},
6267
]
6368
: [
@@ -83,6 +88,7 @@ export const svConfigs: StaticSvConfig[] = isMainNet
8388
},
8489
},
8590
sweep: sweepConfigFromEnv('SV1'),
91+
...(sv1ScanBigQuery ? { scanBigQuery: { dataset: 'devnet_da2_scan', prefix: 'da2' } } : {}),
8692
},
8793
{
8894
// TODO(#12169): consider making nodeName and ingressName the same (also for all other SVs)

0 commit comments

Comments
 (0)