diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala index a335a8416f..d27a56a0fd 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/RecoverExternalPartyIntegrationTest.scala @@ -170,7 +170,7 @@ class RecoverExternalPartyIntegrationTest } val acsSnapshotFile = Files.createTempFile("acs", ".snapshot") Files.write(acsSnapshotFile, acsSnapshot.toByteArray()) - bobValidatorBackend.participantClient.repair.import_acs_old(acsSnapshotFile.toString) + bobValidatorBackend.participantClient.repair.import_acs(acsSnapshotFile.toString) bobValidatorBackend.participantClient.synchronizers.reconnect_all() } diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala index 3448b5600c..0e2b37e072 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala @@ -18,9 +18,14 @@ import com.digitalasset.canton.admin.api.client.data.{ ParticipantStatus, PruningSchedule, } -import com.digitalasset.canton.admin.participant.v30.{ExportAcsOldResponse, PruningServiceGrpc} +import com.digitalasset.canton.participant.admin.data.ContractIdImportMode +import com.digitalasset.canton.admin.participant.v30.{ + ExportAcsResponse, + ExportAcsAtTimestampResponse, + PruningServiceGrpc, +} import com.digitalasset.canton.admin.participant.v30.PruningServiceGrpc.PruningServiceStub -import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.config.{ApiLoggingConfig, ClientConfig, PositiveDurationSeconds} import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.logging.NamedLoggerFactory @@ -283,27 +288,79 @@ class ParticipantAdminConnection( ) } - def downloadAcsSnapshot( + def downloadAcsSnapshotForPartyMigration( parties: Set[PartyId], - filterSynchronizerId: Option[SynchronizerId] = None, - timestamp: Option[Instant] = None, - force: Boolean = false, + filterSynchronizerId: SynchronizerId, + timestamp: Instant, )(implicit traceContext: TraceContext): Future[ByteString] = { logger.debug( show"Downloading ACS snapshot from domain $filterSynchronizerId, for parties $parties at timestamp $timestamp" ) val requestComplete = Promise[ByteString]() // TODO(DACH-NY/canton-network-node#3298) just concatenate the byteString here. Make it scale to 2M contracts. - val observer = new GrpcByteChunksToByteArrayObserver[ExportAcsOldResponse](requestComplete) + val observer = + new GrpcByteChunksToByteArrayObserver[ExportAcsAtTimestampResponse](requestComplete) runCmd( - ParticipantAdminCommands.ParticipantRepairManagement.ExportAcsOld( + ParticipantAdminCommands.PartyManagement.ExportAcsAtTimestamp( parties = parties, - partiesOffboarding = false, filterSynchronizerId, timestamp, observer, - Map.empty, - force, + ) + ).discard + requestComplete.future + } + + def downloadAcsSnapshotForSynchronizerMigration( + parties: Set[PartyId], + synchronizerId: SynchronizerId, + timestamp: Instant, + disasterRecovery: Boolean, + )(implicit traceContext: TraceContext): Future[ByteString] = { + // ExportAcsAtTimestamp only works if the timestamp corresponds to a PartyToParticipant change so this + // is required for synchronizer migrations and disaster recovery. Without the force flag, + // this will fail until we have processed a timestamp >= requested timestamp. On migrations this is guaranteed + // to happen for all nodes due to time proofs. On disaster recovery, we cannot guarantee this so we + // use force=true which will not wait until a timestamp >= requested timestamp has been processed. + getHighestOffsetByTimestamp(synchronizerId, timestamp, force = disasterRecovery).flatMap { + offset => + downloadAcsSnapshotAtOffset(parties, synchronizerId, offset.unwrap) + } + } + + def getHighestOffsetByTimestamp( + synchronizerId: SynchronizerId, + timestamp: Instant, + force: Boolean, + )(implicit tc: TraceContext): Future[NonNegativeLong] = { + runCmd( + ParticipantAdminCommands.PartyManagement + .GetHighestOffsetByTimestamp(synchronizerId, timestamp, force) + ).map { offset => + logger.debug(s"Translated $timestamp on $synchronizerId to $offset with force=$force") + offset + } + } + + def downloadAcsSnapshotAtOffset( + parties: Set[PartyId], + filterSynchronizerId: SynchronizerId, + offset: Long, + )(implicit traceContext: TraceContext): Future[ByteString] = { + logger.debug( + show"Downloading ACS snapshot from domain $filterSynchronizerId, for parties $parties at offset $offset" + ) + val requestComplete = Promise[ByteString]() + // TODO(#3298) just concatenate the byteString here. Make it scale to 2M contracts. + val observer = + new GrpcByteChunksToByteArrayObserver[ExportAcsResponse](requestComplete) + runCmd( + ParticipantAdminCommands.PartyManagement.ExportAcs( + parties = parties, + Some(filterSynchronizerId), + offset, + observer, + contractSynchronizerRenames = Map.empty, ) ).discard requestComplete.future @@ -317,10 +374,10 @@ class ParticipantAdminConnection( "Imports the acs in the participantl", runCmd( ParticipantAdminCommands.ParticipantRepairManagement - .ImportAcsOld( + .ImportAcs( acsBytes, - IMPORT_ACS_WORKFLOW_ID_PREFIX, - allowContractIdSuffixRecomputation = false, + workflowIdPrefix = IMPORT_ACS_WORKFLOW_ID_PREFIX, + contractIdImportMode = ContractIdImportMode.Validation, ) ).map(_ => ()), logger, diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/AcsExporter.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/AcsExporter.scala index 2ee45a34ba..603e4a4ef2 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/AcsExporter.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/migration/AcsExporter.scala @@ -39,16 +39,16 @@ class AcsExporter( def exportAcsAtTimestamp( domain: SynchronizerId, timestamp: Instant, - force: Boolean, + disasterRecovery: Boolean, parties: PartyId* )(implicit tc: TraceContext ): Future[ByteString] = { - participantAdminConnection.downloadAcsSnapshot( + participantAdminConnection.downloadAcsSnapshotForSynchronizerMigration( parties = parties.toSet, - filterSynchronizerId = Some(domain), - timestamp = Some(timestamp), - force = force, + synchronizerId = domain, + timestamp = timestamp, + disasterRecovery = disasterRecovery, ) } @@ -90,11 +90,11 @@ class AcsExporter( ) acsSnapshotTimestamp = domainParamsStateTopology.base.validFrom snapshot <- EitherT.liftF[Future, AcsExportFailure, ByteString]( - participantAdminConnection.downloadAcsSnapshot( + participantAdminConnection.downloadAcsSnapshotForSynchronizerMigration( parties = parties.toSet, - filterSynchronizerId = Some(domain), - timestamp = Some(acsSnapshotTimestamp), - force = true, + synchronizerId = domain, + timestamp = acsSnapshotTimestamp, + disasterRecovery = false, ) ) } yield { diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala index b6de02143e..f367b32d4b 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala @@ -249,7 +249,7 @@ class ScanApp( config.spliceInstanceNames, participantAdminConnection, sequencerAdminConnection, - store, + automation, acsSnapshotStore, dsoAnsResolver, config.miningRoundsCacheTimeToLiveOverride, diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala index cbc8440474..9fb7cd4845 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala @@ -44,6 +44,7 @@ import org.lfdecentralizedtrust.splice.http.v0.definitions.{ import org.lfdecentralizedtrust.splice.http.v0.scan.ScanResource import org.lfdecentralizedtrust.splice.http.v0.{definitions, scan as v0} import org.lfdecentralizedtrust.splice.scan.store.{AcsSnapshotStore, ScanStore, TxLogEntry} +import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion import org.lfdecentralizedtrust.splice.util.{ Codec, Contract, @@ -53,9 +54,10 @@ import org.lfdecentralizedtrust.splice.util.{ } import org.lfdecentralizedtrust.splice.util.PrettyInstances.* import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.participant.admin.data.ActiveContractOld as ActiveContract +import com.digitalasset.canton.participant.admin.data.ActiveContract import com.digitalasset.canton.topology.{Member, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{ByteStringUtil, GrpcStreamingUtils, ResourceUtil} import com.digitalasset.canton.util.ShowUtil.* import com.google.protobuf.ByteString import io.grpc.Status @@ -65,6 +67,7 @@ import scala.concurrent.{ExecutionContextExecutor, Future} import scala.jdk.CollectionConverters.* import scala.jdk.OptionConverters.* import scala.util.{Try, Using} +import java.io.ByteArrayInputStream import java.util.Base64 import java.util.zip.GZIPOutputStream import java.time.{Instant, OffsetDateTime, ZoneOffset} @@ -98,7 +101,7 @@ class HttpScanHandler( spliceInstanceNames: SpliceInstanceNamesConfig, participantAdminConnection: ParticipantAdminConnection, sequencerAdminConnection: SequencerAdminConnection, - protected val store: ScanStore, + protected val storeWithIngestion: AppStoreWithIngestion[ScanStore], snapshotStore: AcsSnapshotStore, dsoAnsResolver: DsoAnsResolver, miningRoundsCacheTimeToLiveOverride: Option[NonNegativeFiniteDuration], @@ -115,6 +118,7 @@ class HttpScanHandler( with HttpValidatorLicensesHandler with HttpFeatureSupportHandler { + private val store = storeWithIngestion.store override protected val workflowId: String = this.getClass.getSimpleName override protected val votesStore: VotesStore = store override protected val validatorLicensesStore: AppStore = store @@ -1147,17 +1151,37 @@ class HttpScanHandler( /** Filter the given ACS snapshot to contracts the given party is a stakeholder on */ // TODO(#828) Move this logic inside a Canton gRPC API. private def filterAcsSnapshot(input: ByteString, stakeholder: PartyId): ByteString = { - val contracts = ActiveContract - .loadFromByteString(input) - .valueOr(error => - throw Status.INTERNAL - .withDescription(s"Failed to read ACS snapshot: ${error}") - .asRuntimeException() - ) + val decompressedBytes = + ByteStringUtil + .decompressGzip(input, None) + .valueOr(err => + throw Status.INVALID_ARGUMENT + .withDescription(s"Failed to decompress bytes: $err") + .asRuntimeException + ) + val contracts = ResourceUtil.withResource( + new ByteArrayInputStream(decompressedBytes.toByteArray) + ) { inputSource => + GrpcStreamingUtils + .parseDelimitedFromTrusted[ActiveContract]( + inputSource, + ActiveContract, + ) + .valueOr(err => + throw Status.INVALID_ARGUMENT + .withDescription(s"Failed to parse contracts in acs snapshot: $err") + .asRuntimeException + ) + } val output = ByteString.newOutput Using.resource(new GZIPOutputStream(output)) { outputStream => - contracts.filter(c => c.contract.metadata.stakeholders.contains(stakeholder.toLf)).foreach { - c => + contracts + .filter(c => + c.contract.getCreatedEvent.signatories.contains( + stakeholder.toLf + ) || c.contract.getCreatedEvent.observers.contains(stakeholder.toLf) + ) + .foreach { c => c.writeDelimitedTo(outputStream) match { case Left(error) => throw Status.INTERNAL @@ -1165,7 +1189,7 @@ class HttpScanHandler( .asRuntimeException() case Right(_) => outputStream.flush() } - } + } } output.toByteString } @@ -1180,6 +1204,20 @@ class HttpScanHandler( withSpan(s"$workflowId.getAcsSnapshot") { _ => _ => val partyId = PartyId.tryFromProtoPrimitive(party) for { + synchronizerId <- store + .lookupAmuletRules() + .map( + _.getOrElse( + throw io.grpc.Status.FAILED_PRECONDITION + .withDescription("No amulet rules.") + .asRuntimeException() + ).state.fold( + identity, + throw io.grpc.Status.FAILED_PRECONDITION + .withDescription("Amulet rules are in flight.") + .asRuntimeException(), + ) + ) // The DSO party is a stakeholder on all "important" contracts, in particular, all amulet holdings and ANS entries. // This means the SV participants ingest data for that party and we can take a snapshot for that party. // To make sure the snapshot is the same regardless of which SV is queried, we filter it down to @@ -1188,10 +1226,24 @@ class HttpScanHandler( // that users backup their own ACS. // As the DSO party is hosted on all SVs, an arbitrary scan instance can be chosen for the ACS snapshot. // BFT reads are usually not required since ACS commitments act as a check that the ACS was correct. - acsSnapshot <- participantAdminConnection.downloadAcsSnapshot( - Set(partyId), - timestamp = recordTime.map(_.toInstant), - ) + acsSnapshot <- recordTime match { + case None => + storeWithIngestion.connection.ledgerEnd().flatMap { offset => + participantAdminConnection.downloadAcsSnapshotAtOffset( + Set(partyId), + offset = offset, + filterSynchronizerId = synchronizerId, + ) + } + case Some(time) => + // To support more timestamp we use forSynchronizerMigration instead of forPartyMigration + participantAdminConnection.downloadAcsSnapshotForSynchronizerMigration( + Set(partyId), + timestamp = time.toInstant, + synchronizerId = synchronizerId, + disasterRecovery = false, + ) + } } yield { val filteredAcsSnapshot = filterAcsSnapshot(acsSnapshot, store.key.dsoParty) diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala index 07132197a7..189c0f9608 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala @@ -518,7 +518,6 @@ class SvApp( localSynchronizerNode, retryProvider, new DsoPartyMigration( - svAutomation, dsoAutomation, participantAdminConnection, retryProvider, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala index 028bed56b6..f8290f1f90 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala @@ -560,7 +560,7 @@ class HttpSvAdminHandler( .getDomainDataSnapshot( Instant.parse(timestamp), partyId.map(Codec.tryDecode(Codec.Party)(_)), - force.getOrElse(false), + disasterRecovery = true, ) .map { response => val responseHttp = response.toHttp diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/migration/DomainDataSnapshotGenerator.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/migration/DomainDataSnapshotGenerator.scala index 0b261ba1af..e26e53798c 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/migration/DomainDataSnapshotGenerator.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/migration/DomainDataSnapshotGenerator.scala @@ -45,7 +45,7 @@ class DomainDataSnapshotGenerator( def getDomainDataSnapshot( timestamp: Instant, partyId: Option[PartyId], - force: Boolean, + disasterRecovery: Boolean, )(implicit ec: ExecutionContext, tc: TraceContext, @@ -57,7 +57,7 @@ class DomainDataSnapshotGenerator( .exportAcsAtTimestamp( decentralizedSynchronizer, timestamp, - force, + disasterRecovery, partyId.fold(Seq(dsoStore.key.dsoParty, dsoStore.key.svParty))(Seq(_))* ) dars <- darExporter.exportAllDars() diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sponsor/DsoPartyMigration.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sponsor/DsoPartyMigration.scala index a739b7430b..2d1c2d7028 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sponsor/DsoPartyMigration.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sponsor/DsoPartyMigration.scala @@ -4,9 +4,6 @@ package org.lfdecentralizedtrust.splice.sv.onboarding.sponsor import cats.data.EitherT -import cats.syntax.foldable.* -import com.digitalasset.base.error.utils.ErrorDetails -import org.lfdecentralizedtrust.splice.codegen.java.splice.amulet.FeaturedAppRight import org.lfdecentralizedtrust.splice.environment.{ ParticipantAdminConnection, RetryFor, @@ -15,20 +12,17 @@ import org.lfdecentralizedtrust.splice.environment.{ import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion import org.lfdecentralizedtrust.splice.sv.onboarding.DsoPartyHosting import org.lfdecentralizedtrust.splice.sv.onboarding.DsoPartyHosting.DsoPartyMigrationFailure -import org.lfdecentralizedtrust.splice.sv.store.{SvSvStore, SvDsoStore} +import org.lfdecentralizedtrust.splice.sv.store.SvDsoStore import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.admin.repair.RepairServiceError import com.digitalasset.canton.topology.{SynchronizerId, ParticipantId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ShowUtil.* import com.google.protobuf.ByteString -import io.grpc.{Status, StatusRuntimeException} import java.time.Instant import scala.concurrent.{ExecutionContextExecutor, Future} class DsoPartyMigration( - svStoreWithIngestion: AppStoreWithIngestion[SvSvStore], dsoStoreWithIngestion: AppStoreWithIngestion[SvDsoStore], participantAdminConnection: ParticipantAdminConnection, retryProvider: RetryProvider, @@ -39,7 +33,6 @@ class DsoPartyMigration( ) extends NamedLogging { private val dsoStore = dsoStoreWithIngestion.store - private val svParty = dsoStore.key.svParty private val dsoParty = dsoStore.key.dsoParty private val partyHosting = new SponsorDsoPartyHosting( participantAdminConnection, @@ -74,24 +67,6 @@ class DsoPartyMigration( authorizedAt: Instant, decentralizedSynchronizer: SynchronizerId, )(implicit tc: TraceContext): Future[ByteString] = { - def submitDummyTransaction(): Future[Unit] = - svStoreWithIngestion.connection - .submit( - Seq(svParty), - Seq.empty, - // The transaction here is arbitrary with the restriction that it should not have the DSO as a stakeholder. - // FeaturedAppRight just happens to be one of the simplest templates we have. - new FeaturedAppRight(svParty.toProtoPrimitive, svParty.toProtoPrimitive).createAnd - .exerciseArchive(), - ) - .withSynchronizerId(decentralizedSynchronizer) - .noDedup - .yieldUnit() - // Acquiring the ACS snapshot is tricky due to two issues: - // 1. The snapshot can only be acquired at a "clean" timestamp which means there are no outstanding ACS commitments. - // To ensure that the timestamp will eventually be clean we need to submit a transaction visible to the participant (submitDummyTransaction) and - // retry the download afterwards. Note that due to the second issue, this transaction must not change contracts with DSO as the stakeholder. - // 2. Concurrent ACS pruning in Canton can prune the data for that timestamp. In that case, we give up. for { snapshot <- { retryProvider.retry( @@ -99,37 +74,11 @@ class DsoPartyMigration( "download_acs_snapshot", show"Download ACS snapshot for DSO at $authorizedAt", participantAdminConnection - .downloadAcsSnapshot( + .downloadAcsSnapshotForPartyMigration( Set(dsoParty), - filterSynchronizerId = Some(decentralizedSynchronizer), - timestamp = Some(authorizedAt), - ) - .recoverWith { case ex: StatusRuntimeException => - val errorDetails = ErrorDetails.from(ex: StatusRuntimeException) - for { - // Special case some exceptions - _ <- errorDetails.traverse_ { - case ErrorDetails - .ErrorInfoDetail(RepairServiceError.UnavailableAcsSnapshot.id, metadata) => - val msg = - s"Requested record time $authorizedAt has been pruned: $metadata, make sure that journal-garbage-collection-delay is configured sufficiently high" - logger.warn(msg) - Future.failed(Status.INVALID_ARGUMENT.withDescription(msg).asRuntimeException()) - case ErrorDetails.ErrorInfoDetail( - RepairServiceError.InvalidAcsSnapshotTimestamp.id, - metadata, - ) => - logger.info( - s"Requested record time $authorizedAt is not yet clean: $metadata, submitting dummy transaction" - ) - submitDummyTransaction() - case _ => Future.unit - } - } yield { - // Rethrow everything else - throw ex - } - }, + filterSynchronizerId = decentralizedSynchronizer, + timestamp = authorizedAt, + ), logger, ) } diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/admin/http/HttpValidatorAdminHandler.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/admin/http/HttpValidatorAdminHandler.scala index 3c8ea6f9b7..84280e4e6f 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/admin/http/HttpValidatorAdminHandler.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/admin/http/HttpValidatorAdminHandler.scala @@ -176,7 +176,7 @@ class HttpValidatorAdminHandler( synchronizerId, // TODO(DACH-NY/canton-network-node#9731): get migration id from scan instead of configuring here migrationId getOrElse (config.domainMigrationId + 1), - force.getOrElse(false), + disasterRecovery = true, ) .map { response => v0.ValidatorAdminResource.GetValidatorDomainDataSnapshotResponse.OK( diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDumpGenerator.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDumpGenerator.scala index 0532f590e9..487b255a4b 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDumpGenerator.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDumpGenerator.scala @@ -79,7 +79,7 @@ class DomainMigrationDumpGenerator( timestamp: Instant, domain: SynchronizerId, migrationId: Long, - force: Boolean, + disasterRecovery: Boolean, )(implicit ec: ExecutionContext, tc: TraceContext, @@ -97,7 +97,7 @@ class DomainMigrationDumpGenerator( acsSnapshot <- acsExporter.exportAcsAtTimestamp( domain, timestamp, - force, + disasterRecovery, parties* ) dars <- darExporter.exportAllDars()