From 7b8dc43bc690210bd298bbcc0c4cd98dd7b30a49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Mon, 13 Oct 2025 14:52:43 +0000 Subject: [PATCH 01/10] Disable UpdateHistory for SV and Splitwell apps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ci] Signed-off-by: Oriol Muñoz --- .../UpdateHistorySanityCheckPlugin.scala | 2 +- ...SynchronizerMigrationIntegrationTest.scala | 4 +- ...canHistoryBackfillingIntegrationTest.scala | 16 +-- .../tests/ScanTimeBasedIntegrationTest.scala | 2 +- ...anTotalSupplyBigQueryIntegrationTest.scala | 2 +- .../tests/UpdateHistoryIntegrationTest.scala | 20 +-- .../splice/util/UpdateHistoryTestUtil.scala | 2 +- .../splice/automation/AutomationService.scala | 2 +- .../SpliceAppAutomationService.scala | 14 --- .../automation/TxLogBackfillingTrigger.scala | 12 +- .../environment/NodeBootstrapBase.scala | 7 +- .../splice/store/AppStore.scala | 5 +- .../splice/store/HistoryMetrics.scala | 6 + .../splice/store/UpdateHistory.scala | 13 +- .../splice/store/db/DbAppStore.scala | 29 +---- .../store/TxLogBackfillingStoreTest.scala | 1 + .../splice/store/UpdateHistoryTestBase.scala | 5 +- .../splice/scan/ScanApp.scala | 26 +++- .../scan/admin/http/HttpScanHandler.scala | 17 ++- .../automation/ScanAutomationService.scala | 25 +++- .../ScanHistoryBackfillingTrigger.scala | 26 ++-- .../splice/scan/store/CachingScanStore.scala | 5 +- .../splice/scan/store/ScanStore.scala | 4 +- .../splice/scan/store/db/DbScanStore.scala | 11 +- .../scan/store/ScanEventStoreTest.scala | 10 +- .../store/db/AcsSnapshotStoreTest.scala | 5 +- .../splice/store/db/ScanAggregatorTest.scala | 1 - .../splice/store/db/ScanStoreTest.scala | 116 +++++++++--------- .../splice/splitwell/SplitwellApp.scala | 1 + .../SplitwellAutomationService.scala | 1 - .../splitwell/store/db/DbSplitwellStore.scala | 4 - .../automation/SvDsoAutomationService.scala | 5 +- .../sv/automation/SvSvAutomationService.scala | 3 +- .../singlesv/SequencerPruningTrigger.scala | 54 +++++++- .../splice/sv/store/db/DbSvDsoStore.scala | 4 - .../splice/sv/store/db/DbSvSvStore.scala | 4 - .../splice/validator/ValidatorApp.scala | 22 +++- .../ValidatorAutomationService.scala | 17 ++- .../validator/store/db/DbValidatorStore.scala | 4 - .../wallet/ExternalPartyWalletManager.scala | 4 +- .../wallet/ExternalPartyWalletService.scala | 22 +++- .../splice/wallet/UserWalletManager.scala | 4 +- .../splice/wallet/UserWalletService.scala | 22 +++- ...ExternalPartyWalletAutomationService.scala | 18 ++- .../UserWalletAutomationService.scala | 18 ++- .../store/db/DbExternalPartyWalletStore.scala | 13 +- .../wallet/store/db/DbUserWalletStore.scala | 6 +- 47 files changed, 362 insertions(+), 252 deletions(-) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala index 6ea45b394f..1fe316356d 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/plugins/UpdateHistorySanityCheckPlugin.scala @@ -247,7 +247,7 @@ class UpdateHistorySanityCheckPlugin( interval = Span(100, Millis), ) eventually { - scan.automation.store.updateHistory + scan.automation.updateHistory .getBackfillingState() .futureValue should be(BackfillingState.Complete) }( diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala index 99c979feba..3daa527e35 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DecentralizedSynchronizerMigrationIntegrationTest.scala @@ -1093,14 +1093,14 @@ class DecentralizedSynchronizerMigrationIntegrationTest withClueAndLog("Backfilled history includes ACS import") { eventually() { - sv1ScanLocalBackend.appState.store.updateHistory.sourceHistory + sv1ScanLocalBackend.appState.automation.updateHistory.sourceHistory .migrationInfo(1L) .futureValue .exists(_.complete) should be(true) } val backfilledUpdates = - sv1ScanLocalBackend.appState.store.updateHistory + sv1ScanLocalBackend.appState.automation.updateHistory .getAllUpdates(None, PageLimit.tryCreate(1000)) .futureValue backfilledUpdates.collect { diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala index 8ab26b8879..5a27d81490 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanHistoryBackfillingIntegrationTest.scala @@ -281,8 +281,8 @@ class ScanHistoryBackfillingIntegrationTest )( "History marked as free of corrupt snapshots", _ => { - sv1ScanBackend.appState.store.updateHistory.corruptAcsSnapshotsDeleted shouldBe true - sv2ScanBackend.appState.store.updateHistory.corruptAcsSnapshotsDeleted shouldBe true + sv1ScanBackend.appState.automation.updateHistory.corruptAcsSnapshotsDeleted shouldBe true + sv2ScanBackend.appState.automation.updateHistory.corruptAcsSnapshotsDeleted shouldBe true }, ) @@ -309,7 +309,7 @@ class ScanHistoryBackfillingIntegrationTest )( "Backfilling is complete only on the founding SV", _ => { - sv1ScanBackend.appState.store.updateHistory + sv1ScanBackend.appState.automation.updateHistory .getBackfillingState() .futureValue should be(BackfillingState.Complete) // Update history is complete at this point, but the status endpoint only reports @@ -317,7 +317,7 @@ class ScanHistoryBackfillingIntegrationTest sv1ScanBackend.getBackfillingStatus().complete shouldBe false readUpdateHistoryFromScan(sv1ScanBackend) should not be empty - sv2ScanBackend.appState.store.updateHistory + sv2ScanBackend.appState.automation.updateHistory .getBackfillingState() .futureValue should be(BackfillingState.InProgress(false, false)) sv2ScanBackend.getBackfillingStatus().complete shouldBe false @@ -356,12 +356,12 @@ class ScanHistoryBackfillingIntegrationTest )( "All backfilling is complete", _ => { - sv1ScanBackend.appState.store.updateHistory + sv1ScanBackend.appState.automation.updateHistory .getBackfillingState() .futureValue should be(BackfillingState.Complete) // Update history is complete, TxLog is not sv1ScanBackend.getBackfillingStatus().complete shouldBe false - sv2ScanBackend.appState.store.updateHistory + sv2ScanBackend.appState.automation.updateHistory .getBackfillingState() .futureValue should be(BackfillingState.Complete) // Update history is complete, TxLog is not @@ -446,7 +446,7 @@ class ScanHistoryBackfillingIntegrationTest clue("Compare scan history with participant update stream") { compareHistory( sv1Backend.participantClient, - sv1ScanBackend.appState.store.updateHistory, + sv1ScanBackend.appState.automation.updateHistory, ledgerBeginSv1, ) } @@ -556,7 +556,7 @@ class ScanHistoryBackfillingIntegrationTest private def allUpdatesFromScanBackend(scanBackend: ScanAppBackendReference) = { // Need to use the store directly, as the HTTP endpoint refuses to return data unless it's completely backfilled - scanBackend.appState.store.updateHistory + scanBackend.appState.automation.updateHistory .getAllUpdates(None, PageLimit.tryCreate(1000)) .futureValue } diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala index bc24fa0eaf..a1f941042c 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTimeBasedIntegrationTest.scala @@ -434,7 +434,7 @@ class ScanTimeBasedIntegrationTest "Wait for backfilling to complete, as the ACS snapshot trigger is paused until then" ) { eventually() { - sv1ScanBackend.automation.store.updateHistory + sv1ScanBackend.automation.updateHistory .getBackfillingState() .futureValue should be(BackfillingState.Complete) advanceTime(sv1ScanBackend.config.automation.pollingInterval.asJava) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala index 9061725b1a..aebc716ab8 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ScanTotalSupplyBigQueryIntegrationTest.scala @@ -314,7 +314,7 @@ class ScanTotalSupplyBigQueryIntegrationTest case db: DbStorage => db case s => fail(s"non-DB storage configured, unsupported for BigQuery: ${s.getClass}") } - val sourceHistoryId = sv1ScanBackend.appState.store.updateHistory.historyId + val sourceHistoryId = sv1ScanBackend.appState.automation.updateHistory.historyId copyTableToBigQuery( "update_history_creates", diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/UpdateHistoryIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/UpdateHistoryIntegrationTest.scala index 90d7453559..361b62b935 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/UpdateHistoryIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/UpdateHistoryIntegrationTest.scala @@ -156,7 +156,7 @@ class UpdateHistoryIntegrationTest eventually() { compareHistory( sv1Backend.participantClient, - sv1ScanBackend.appState.store.updateHistory, + sv1ScanBackend.appState.automation.updateHistory, ledgerBeginSv1, ) } @@ -181,30 +181,16 @@ class UpdateHistoryIntegrationTest .lookupUserWallet(aliceWalletClient.config.ledgerApiUser) .futureValue .getOrElse(throw new RuntimeException("Alice wallet should exist")) - .store + .automation .updateHistory, ledgerBeginAlice, true, ) } - eventually() { - compareHistory( - sv1Backend.participantClient, - sv1Backend.appState.svStore.updateHistory, - ledgerBeginSv1, - ) - } - eventually() { - compareHistory( - sv1Backend.participantClient, - sv1Backend.appState.dsoStore.updateHistory, - ledgerBeginSv1, - ) - } eventually() { compareHistory( aliceValidatorBackend.participantClient, - aliceValidatorBackend.appState.store.updateHistory, + aliceValidatorBackend.appState.automation.updateHistory, ledgerBeginAlice, ) } diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala index cd58b41ad0..db150725f4 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/util/UpdateHistoryTestUtil.scala @@ -137,7 +137,7 @@ trait UpdateHistoryTestUtil extends TestCommon { scanBackend: ScanAppBackendReference, scanClient: ScanAppClientReference, ): Assertion = { - val historyFromStore = scanBackend.appState.store.updateHistory + val historyFromStore = scanBackend.appState.automation.updateHistory .getAllUpdates( None, PageLimit.tryCreate(1000), diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/AutomationService.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/AutomationService.scala index 031fba7100..83ad5d5c42 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/AutomationService.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/AutomationService.scala @@ -23,7 +23,7 @@ import scala.reflect.ClassTag /** Shared base class for running ingestion and task-handler automation in applications. */ abstract class AutomationService( - private val automationConfig: AutomationConfig, + protected val automationConfig: AutomationConfig, clock: Clock, domainTimeSync: DomainTimeSynchronization, domainUnpausedSync: DomainUnpausedSynchronization, diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala index a0b9decee6..0e5c6b5c95 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala @@ -37,7 +37,6 @@ abstract class SpliceAppAutomationService[Store <: AppStore]( ledgerClient: SpliceLedgerClient, retryProvider: RetryProvider, ingestFromParticipantBegin: Boolean, - ingestUpdateHistoryFromParticipantBegin: Boolean, parametersConfig: SpliceParametersConfig, )(implicit ec: ExecutionContext, @@ -113,19 +112,6 @@ abstract class SpliceAppAutomationService[Store <: AppStore]( ) ) - registerService( - new UpdateIngestionService( - store.updateHistory.getClass.getSimpleName, - store.updateHistory.ingestionSink, - connection(SpliceLedgerConnectionPriority.High), - automationConfig, - backoffClock = triggerContext.pollingClock, - triggerContext.retryProvider, - triggerContext.loggerFactory, - ingestUpdateHistoryFromParticipantBegin, - ) - ) - registerTrigger( new DomainIngestionService( store.domains.ingestionSink, diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/TxLogBackfillingTrigger.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/TxLogBackfillingTrigger.scala index a042e208c5..596370b8b6 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/TxLogBackfillingTrigger.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/TxLogBackfillingTrigger.scala @@ -10,6 +10,7 @@ import org.lfdecentralizedtrust.splice.store.{ HistoryMetrics, TxLogAppStore, TxLogBackfilling, + UpdateHistory, } import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.topology.PartyId @@ -23,6 +24,7 @@ import scala.concurrent.{ExecutionContext, Future} class TxLogBackfillingTrigger[TXE]( store: TxLogAppStore[TXE], + updateHistory: UpdateHistory, batchSize: Int, override protected val context: TriggerContext, )(implicit @@ -31,13 +33,13 @@ class TxLogBackfillingTrigger[TXE]( mat: Materializer, ) extends PollingParallelTaskExecutionTrigger[TxLogBackfillingTrigger.Task] { - private def party: PartyId = store.updateHistory.updateStreamParty + private def party: PartyId = updateHistory.updateStreamParty override protected def extraMetricLabels = Seq( "party" -> party.toProtoPrimitive ) - private val currentMigrationId = store.updateHistory.domainMigrationInfo.currentMigrationId + private val currentMigrationId = updateHistory.domainMigrationInfo.currentMigrationId private val historyMetrics = new HistoryMetrics(context.metricsFactory)( MetricsContext.Empty @@ -48,7 +50,7 @@ class TxLogBackfillingTrigger[TXE]( ) private val backfilling = new TxLogBackfilling( store.multiDomainAcsStore, - store.updateHistory, + updateHistory, batchSize, context.loggerFactory, ) @@ -56,7 +58,7 @@ class TxLogBackfillingTrigger[TXE]( override def retrieveTasks()(implicit tc: TraceContext ): Future[Seq[TxLogBackfillingTrigger.Task]] = { - if (!store.updateHistory.isReady) { + if (!updateHistory.isReady) { logger.debug("UpdateHistory is not yet ready") Future.successful(Seq.empty) } else if (!store.multiDomainAcsStore.destinationHistory.isReady) { @@ -64,7 +66,7 @@ class TxLogBackfillingTrigger[TXE]( Future.successful(Seq.empty) } else { for { - sourceState <- store.updateHistory.getBackfillingState() + sourceState <- updateHistory.getBackfillingState() destinationState <- store.multiDomainAcsStore.getTxLogBackfillingState() } yield { sourceState match { diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/NodeBootstrapBase.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/NodeBootstrapBase.scala index 624aac2b01..3332c5e561 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/NodeBootstrapBase.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/NodeBootstrapBase.scala @@ -14,7 +14,7 @@ import com.digitalasset.canton.crypto.Crypto import com.digitalasset.canton.environment.{CantonNode, CantonNodeBootstrap, CantonNodeParameters} import com.digitalasset.canton.lifecycle.{HasCloseContext, LifeCycle} import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.StorageFactory +import com.digitalasset.canton.resource.{DbStorage, StorageFactory} import com.digitalasset.canton.telemetry.ConfiguredOpenTelemetry import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.{NoTracing, TracerProvider} @@ -102,7 +102,10 @@ abstract class NodeBootstrapBase[ nodeMetrics.storageMetrics, parameterConfig.processingTimeouts, loggerFactory, - ) + ) match { + case storage: DbStorage => storage + case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") + } protected val httpAdminService: HttpAdminService = HttpAdminService( nodeConfig.nodeTypeName, diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/AppStore.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/AppStore.scala index 25ec39bd3a..959bb51931 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/AppStore.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/AppStore.scala @@ -12,17 +12,18 @@ import scala.concurrent.ExecutionContext */ trait AppStore extends NamedLogging with AutoCloseable with StoreErrors { + val storeName: String + implicit protected def ec: ExecutionContext /** Defines which create events are to be ingested into the store. */ - protected def acsContractFilter + def acsContractFilter : MultiDomainAcsStore.ContractFilter[_ <: AcsRowData, _ <: AcsInterfaceViewRowData] def domains: SynchronizerStore def multiDomainAcsStore: MultiDomainAcsStore - def updateHistory: UpdateHistory } trait TxLogAppStore[TXE] extends AppStore { diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/HistoryMetrics.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/HistoryMetrics.scala index f576f5a3da..3a1fe207ce 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/HistoryMetrics.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/HistoryMetrics.scala @@ -209,3 +209,9 @@ class HistoryMetrics(metricsFactory: LabeledMetricsFactory)(implicit ImportUpdatesBackfilling.completed.close() } } + +object HistoryMetrics { + def apply(metricsFactory: LabeledMetricsFactory, currentMigrationId: Long) = new HistoryMetrics( + metricsFactory + )(MetricsContext("current_migration_id" -> currentMigrationId.toString)) +} diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala index 824ec827a7..c44260490c 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala @@ -96,14 +96,15 @@ class UpdateHistory( override protected val loggerFactory: NamedLoggerFactory, enableissue12777Workaround: Boolean, enableImportUpdateBackfill: Boolean, - val oMetrics: Option[HistoryMetrics] = None, + metrics: HistoryMetrics, )(implicit ec: ExecutionContext, closeContext: CloseContext, ) extends HasIngestionSink with AcsJdbcTypes with AcsQueries - with NamedLogging { + with NamedLogging + with AutoCloseable { override lazy val profile: JdbcProfile = storage.api.jdbcProfile @@ -133,6 +134,8 @@ class UpdateHistory( def isReady: Boolean = state.get().historyId.isDefined + override def close(): Unit = metrics.close() + lazy val ingestionSink: MultiDomainAcsStore.IngestionSink = new MultiDomainAcsStore.IngestionSink { override def ingestionFilter: IngestionFilter = IngestionFilter( @@ -442,7 +445,7 @@ class UpdateHistory( val safeParticipantOffset = lengthLimited(LegacyOffset.Api.fromLong(reassignment.offset)) val safeUnassignId = lengthLimited(event.unassignId) val safeContractId = lengthLimited(event.contractId.contractId) - oMetrics.foreach(_.UpdateHistory.unassignments.mark()) + metrics.UpdateHistory.unassignments.mark() sqlu""" insert into update_history_unassignments( history_id,update_id,record_time, @@ -488,7 +491,7 @@ class UpdateHistory( val safeCreatedAt = CantonTimestamp.assertFromInstant(event.createdEvent.createdAt) val safeSignatories = event.createdEvent.getSignatories.asScala.toSeq.map(lengthLimited) val safeObservers = event.createdEvent.getObservers.asScala.toSeq.map(lengthLimited) - oMetrics.foreach(_.UpdateHistory.assignments.mark()) + metrics.UpdateHistory.assignments.mark() sqlu""" insert into update_history_assignments( history_id,update_id,record_time, @@ -518,7 +521,7 @@ class UpdateHistory( tree: TransactionTree, migrationId: Long, ): DBIOAction[?, NoStream, Effect.Read & Effect.Write] = { - oMetrics.foreach(_.UpdateHistory.transactionsTrees.mark()) + metrics.UpdateHistory.transactionsTrees.mark() insertTransactionUpdateRow(tree, migrationId).flatMap(updateRowId => { // Note: the order of elements in the eventsById map doesn't matter, and is not preserved here. // The order of elements in the rootEventIds and childEventIds lists DOES matter, and needs to be preserved. diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala index 3a6de14bac..de26656d8b 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala @@ -11,7 +11,6 @@ import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.ParticipantId -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import scala.concurrent.ExecutionContext @@ -24,10 +23,6 @@ abstract class DbTxLogAppStore[TXE]( txLogStoreDescriptor: DbMultiDomainAcsStore.StoreDescriptor, domainMigrationInfo: DomainMigrationInfo, participantId: ParticipantId, - enableissue12777Workaround: Boolean, - enableImportUpdateBackfill: Boolean, - backfillingRequired: BackfillingRequirement, - oHistoryMetrics: Option[HistoryMetrics] = None, )(implicit override protected val ec: ExecutionContext, templateJsonDecoder: TemplateJsonDecoder, @@ -39,10 +34,6 @@ abstract class DbTxLogAppStore[TXE]( acsStoreDescriptor = acsStoreDescriptor, domainMigrationInfo = domainMigrationInfo, participantId = participantId, - enableissue12777Workaround = enableissue12777Workaround, - enableImportUpdateBackfill = enableImportUpdateBackfill, - backfillingRequired, - oHistoryMetrics = oHistoryMetrics, ) with TxLogAppStore[TXE] { @@ -71,10 +62,6 @@ abstract class DbAppStore( acsStoreDescriptor: DbMultiDomainAcsStore.StoreDescriptor, domainMigrationInfo: DomainMigrationInfo, participantId: ParticipantId, - enableissue12777Workaround: Boolean, - enableImportUpdateBackfill: Boolean, - backfillingRequired: BackfillingRequirement, - oHistoryMetrics: Option[HistoryMetrics] = None, )(implicit protected val ec: ExecutionContext, templateJsonDecoder: TemplateJsonDecoder, @@ -103,6 +90,8 @@ abstract class DbAppStore( handleIngestionSummary, ) + override val storeName: String = multiDomainAcsStore.storeName + override lazy val domains: InMemorySynchronizerStore = new InMemorySynchronizerStore( acsContractFilter.ingestionFilter.primaryParty, @@ -110,20 +99,6 @@ abstract class DbAppStore( retryProvider, ) - override lazy val updateHistory: UpdateHistory = - new UpdateHistory( - storage, - domainMigrationInfo, - acsStoreDescriptor.name, - participantId, - acsContractFilter.ingestionFilter.primaryParty, - backfillingRequired, - loggerFactory, - enableissue12777Workaround, - enableImportUpdateBackfill, - oHistoryMetrics, - ) - override def close(): Unit = { multiDomainAcsStore.close() } diff --git a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/TxLogBackfillingStoreTest.scala b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/TxLogBackfillingStoreTest.scala index 532d8b06d4..fdeb8be802 100644 --- a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/TxLogBackfillingStoreTest.scala +++ b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/TxLogBackfillingStoreTest.scala @@ -559,6 +559,7 @@ class TxLogBackfillingStoreTest loggerFactory, enableissue12777Workaround = true, enableImportUpdateBackfill = true, + HistoryMetrics.apply(NoOpMetricsFactory, migrationId), ) } diff --git a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala index 65f21ac5ec..617daf9b7b 100644 --- a/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala +++ b/apps/common/src/test/scala/org/lfdecentralizedtrust/splice/store/UpdateHistoryTestBase.scala @@ -9,7 +9,6 @@ import org.lfdecentralizedtrust.splice.environment.ledger.api.{ ReassignmentUpdate, TransactionTreeUpdate, } -import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo import org.lfdecentralizedtrust.splice.store.db.{AcsJdbcTypes, AcsTables, SplicePostgresTest} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.FutureUnlessShutdown @@ -26,8 +25,9 @@ import org.scalatest.Assertion import scala.concurrent.Future import scala.jdk.CollectionConverters.* - import UpdateHistory.UpdateHistoryResponse +import com.daml.metrics.api.noop.NoOpMetricsFactory +import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo abstract class UpdateHistoryTestBase extends StoreTest @@ -253,6 +253,7 @@ abstract class UpdateHistoryTestBase loggerFactory, enableissue12777Workaround = true, enableImportUpdateBackfill = true, + HistoryMetrics.apply(NoOpMetricsFactory, domainMigrationId), ) } diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala index 2dff6d3077..8e228bf782 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/ScanApp.scala @@ -46,14 +46,14 @@ import org.lfdecentralizedtrust.splice.scan.store.db.{ ScanAggregatesReaderContext, } import org.lfdecentralizedtrust.splice.scan.dso.DsoAnsResolver -import org.lfdecentralizedtrust.splice.store.PageLimit +import org.lfdecentralizedtrust.splice.store.{PageLimit, UpdateHistory} import org.lfdecentralizedtrust.splice.util.HasHealth import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.lifecycle.LifeCycle import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.{DbStorage, Storage} import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} @@ -65,6 +65,7 @@ import org.apache.pekko.http.cors.scaladsl.settings.CorsSettings import scala.concurrent.{ExecutionContextExecutor, Future} import org.apache.pekko.stream.Materializer import org.lfdecentralizedtrust.splice.http.HttpRateLimiter +import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement /** Class representing a Scan app instance. * @@ -74,7 +75,7 @@ class ScanApp( override val name: InstanceName, val config: ScanAppBackendConfig, val amuletAppParameters: SharedSpliceAppParameters, - storage: Storage, + storage: DbStorage, override protected val clock: Clock, val loggerFactory: NamedLoggerFactory, tracerProvider: TracerProvider, @@ -181,13 +182,24 @@ class ScanApp( migrationInfo, participantId, config.cache, - config.updateHistoryBackfillImportUpdatesEnabled, nodeMetrics.dbScanStore, initialRound.toLong, ) + updateHistory = new UpdateHistory( + storage, + migrationInfo, + store.storeName, + participantId, + store.acsContractFilter.ingestionFilter.primaryParty, + BackfillingRequirement.NeedsBackfilling, + loggerFactory, + enableissue12777Workaround = true, + enableImportUpdateBackfill = config.updateHistoryBackfillImportUpdatesEnabled, + nodeMetrics.dbScanStore.history, + ) acsSnapshotStore = AcsSnapshotStore( storage, - store.updateHistory, + updateHistory, dsoParty, migrationInfo.currentMigrationId, loggerFactory, @@ -206,6 +218,7 @@ class ScanApp( retryProvider, loggerFactory, store, + updateHistory, storage, acsSnapshotStore, config.ingestFromParticipantBegin, @@ -218,7 +231,7 @@ class ScanApp( scanVerdictStore = DbScanVerdictStore(storage, loggerFactory)(ec) scanEventStore = new ScanEventStore( scanVerdictStore, - store.updateHistory, + updateHistory, loggerFactory, )(ec) _ <- appInitStep("Wait until there is an OpenMiningRound contract") { @@ -281,6 +294,7 @@ class ScanApp( participantAdminConnection, sequencerAdminConnection, store, + updateHistory, acsSnapshotStore, scanEventStore, dsoAnsResolver, diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala index cef5344e68..5b1531ae34 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/admin/http/HttpScanHandler.scala @@ -107,6 +107,7 @@ class HttpScanHandler( participantAdminConnection: ParticipantAdminConnection, sequencerAdminConnection: SequencerAdminConnection, protected val store: ScanStore, + updateHistory: UpdateHistory, snapshotStore: AcsSnapshotStore, eventStore: ScanEventStore, dsoAnsResolver: DsoAnsResolver, @@ -757,7 +758,6 @@ class HttpScanHandler( extracted: TraceContext, ): Future[Vector[definitions.UpdateHistoryItem]] = { implicit val tc: TraceContext = extracted - val updateHistory = store.updateHistory val afterO = after.map { after => val afterRecordTime = parseTimestamp(after.afterRecordTime) ( @@ -864,7 +864,7 @@ class HttpScanHandler( for { eventO <- eventStore.getEventByUpdateId( updateId, - store.updateHistory.domainMigrationInfo.currentMigrationId, + updateHistory.domainMigrationInfo.currentMigrationId, ) } yield { eventO match { @@ -906,7 +906,6 @@ class HttpScanHandler( extracted: TraceContext, ): Future[Vector[definitions.EventHistoryItem]] = { implicit val tc: TraceContext = extracted - val updateHistory = store.updateHistory val afterO = after.map { a => val afterRecordTime = parseTimestamp(a.afterRecordTime) (a.afterMigrationId, afterRecordTime) @@ -1258,7 +1257,7 @@ class HttpScanHandler( .sequentialTraverse(txLogEntryMap.view.toList) { case (cid, entry) => // The update history ingests independently so this lookup can return None temporarily. // We just filter out those contracts. - store.updateHistory + updateHistory .lookupContractById(TransferCommand.COMPANION)(cid) .map( _.map(c => @@ -1398,7 +1397,7 @@ class HttpScanHandler( .asRuntimeException(), ) ) - snapshotTime <- snapshotStore.updateHistory + snapshotTime <- updateHistory .getUpdatesBefore( snapshotStore.currentMigrationId, synchronizerId, @@ -1628,7 +1627,7 @@ class HttpScanHandler( ): Future[Either[definitions.ErrorResponse, definitions.UpdateHistoryItem]] = { implicit val tc = extracted for { - tx <- store.updateHistory.getUpdate(updateId) + tx <- updateHistory.getUpdate(updateId) } yield { tx.fold[Either[definitions.ErrorResponse, definitions.UpdateHistoryItem]]( Left( @@ -1964,7 +1963,7 @@ class HttpScanHandler( )(extracted: TraceContext): Future[ScanResource.GetMigrationInfoResponse] = { implicit val tc = extracted withSpan(s"$workflowId.getMigrationInfo") { _ => _ => - val sourceHistory = store.updateHistory.sourceHistory + val sourceHistory = updateHistory.sourceHistory for { infoO <- sourceHistory.migrationInfo(body.migrationId) } yield infoO match { @@ -1997,7 +1996,6 @@ class HttpScanHandler( )(extracted: TraceContext): Future[ScanResource.GetUpdatesBeforeResponse] = { implicit val tc: TraceContext = extracted withSpan(s"$workflowId.getUpdatesBefore") { _ => _ => - val updateHistory = store.updateHistory updateHistory .getUpdatesBefore( migrationId = body.migrationId, @@ -2028,7 +2026,6 @@ class HttpScanHandler( )(extracted: TraceContext): Future[ScanResource.GetImportUpdatesResponse] = { implicit val tc: TraceContext = extracted withSpan(s"$workflowId.getImportUpdates") { _ => _ => - val updateHistory = store.updateHistory updateHistory .getImportUpdates( migrationId = body.migrationId, @@ -2158,7 +2155,7 @@ class HttpScanHandler( implicit val tc = extracted withSpan(s"$workflowId.getBackfillingStatus") { _ => _ => for { - updateHistoryStatus <- store.updateHistory.getBackfillingState() + updateHistoryStatus <- updateHistory.getBackfillingState() txLogStatus <- store.multiDomainAcsStore.getTxLogBackfillingState() updateHistoryComplete = updateHistoryStatus == BackfillingState.Complete txLogComplete = txLogStatus == TxLogBackfillingState.Complete diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala index 8ada2ace18..548e250933 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala @@ -9,6 +9,7 @@ import org.lfdecentralizedtrust.splice.automation.{ SpliceAppAutomationService, SqlIndexInitializationTrigger, TxLogBackfillingTrigger, + UpdateIngestionService, } import org.lfdecentralizedtrust.splice.config.UpgradesConfig import org.lfdecentralizedtrust.splice.environment.{RetryProvider, SpliceLedgerClient} @@ -17,6 +18,7 @@ import org.lfdecentralizedtrust.splice.scan.config.ScanAppBackendConfig import org.lfdecentralizedtrust.splice.store.{ DomainTimeSynchronization, DomainUnpausedSynchronization, + UpdateHistory, } import org.lfdecentralizedtrust.splice.scan.store.{AcsSnapshotStore, ScanStore} import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder @@ -25,6 +27,7 @@ import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.PartyId import io.opentelemetry.api.trace.Tracer +import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import scala.concurrent.ExecutionContextExecutor @@ -36,6 +39,7 @@ class ScanAutomationService( retryProvider: RetryProvider, protected val loggerFactory: NamedLoggerFactory, store: ScanStore, + val updateHistory: UpdateHistory, storage: Storage, snapshotStore: AcsSnapshotStore, ingestFromParticipantBegin: Boolean, @@ -60,7 +64,6 @@ class ScanAutomationService( ledgerClient, retryProvider, ingestFromParticipantBegin, - ingestUpdateHistoryFromParticipantBegin, config.parameters, ) { override def companion @@ -71,10 +74,25 @@ class ScanAutomationService( registerTrigger( new ScanBackfillAggregatesTrigger(store, triggerContext, initialRound) ) + + registerService( + new UpdateIngestionService( + updateHistory.getClass.getSimpleName, + updateHistory.ingestionSink, + connection(SpliceLedgerConnectionPriority.High), + automationConfig, + backoffClock = triggerContext.pollingClock, + triggerContext.retryProvider, + triggerContext.loggerFactory, + ingestUpdateHistoryFromParticipantBegin, + ) + ) + if (config.updateHistoryBackfillEnabled) { registerTrigger( new ScanHistoryBackfillingTrigger( store, + updateHistory, svName, ledgerClient, config.updateHistoryBackfillBatchSize, @@ -88,7 +106,7 @@ class ScanAutomationService( registerTrigger( new AcsSnapshotTrigger( snapshotStore, - store.updateHistory, + updateHistory, config.acsSnapshotPeriodHours, // The acs snapshot trigger should not attempt to backfill snapshots unless the backfilling // UpdateHistory is fully enabled and complete. @@ -100,7 +118,7 @@ class ScanAutomationService( registerTrigger( new DeleteCorruptAcsSnapshotTrigger( snapshotStore, - store.updateHistory, + updateHistory, triggerContext, ) ) @@ -109,6 +127,7 @@ class ScanAutomationService( registerTrigger( new TxLogBackfillingTrigger( store, + updateHistory, config.txLogBackfillBatchSize, triggerContext, ) diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanHistoryBackfillingTrigger.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanHistoryBackfillingTrigger.scala index 0889357595..1f930a8f88 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanHistoryBackfillingTrigger.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanHistoryBackfillingTrigger.scala @@ -31,6 +31,7 @@ import org.lfdecentralizedtrust.splice.store.{ ImportUpdatesBackfilling, PageLimit, TreeUpdateWithMigrationId, + UpdateHistory, } import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder import com.digitalasset.canton.data.CantonTimestamp @@ -46,6 +47,7 @@ import scala.concurrent.{ExecutionContextExecutor, Future, blocking} class ScanHistoryBackfillingTrigger( store: ScanStore, + updateHistory: UpdateHistory, svName: String, ledgerClient: SpliceLedgerClient, batchSize: Int, @@ -61,7 +63,7 @@ class ScanHistoryBackfillingTrigger( mat: Materializer, ) extends PollingParallelTaskExecutionTrigger[ScanHistoryBackfillingTrigger.Task] { - private val currentMigrationId = store.updateHistory.domainMigrationInfo.currentMigrationId + private val currentMigrationId = updateHistory.domainMigrationInfo.currentMigrationId private val historyMetrics = new HistoryMetrics(context.metricsFactory)( MetricsContext( @@ -70,7 +72,7 @@ class ScanHistoryBackfillingTrigger( ) /** A cursor for iterating over the beginning of the update history in findHistoryStart, - * see [[org.lfdecentralizedtrust.splice.store.UpdateHistory.getUpdates()]]. + * see [[org.lfdecentralizedtrust.splice.updateHistory.getUpdates()]]. * We need to store this as we don't want to start over from the beginning every time the trigger runs. */ @SuppressWarnings(Array("org.wartremover.warts.Var")) @@ -88,14 +90,14 @@ class ScanHistoryBackfillingTrigger( override def retrieveTasks()(implicit tc: TraceContext ): Future[Seq[ScanHistoryBackfillingTrigger.Task]] = { - if (!store.updateHistory.isReady) { + if (!updateHistory.isReady) { logger.debug("UpdateHistory is not yet ready") Future.successful(Seq.empty) - } else if (importUpdateBackfillingEnabled && !store.updateHistory.corruptAcsSnapshotsDeleted) { + } else if (importUpdateBackfillingEnabled && !updateHistory.corruptAcsSnapshotsDeleted) { logger.debug("There may be corrupt ACS snapshots that need to be deleted") Future.successful(Seq.empty) } else { - store.updateHistory.getBackfillingState().map { + updateHistory.getBackfillingState().map { case BackfillingState.Complete => historyMetrics.UpdateHistoryBackfilling.completed.updateValue(1) historyMetrics.ImportUpdatesBackfilling.completed.updateValue(1) @@ -149,7 +151,7 @@ class ScanHistoryBackfillingTrigger( result <- initialUpdateO match { case Some(FoundingTransactionTreeUpdate(treeUpdate, _)) => for { - _ <- store.updateHistory + _ <- updateHistory .initializeBackfilling( treeUpdate.migrationId, treeUpdate.update.synchronizerId, @@ -163,8 +165,8 @@ class ScanHistoryBackfillingTrigger( for { // Before deleting updates, we need to delete ACS snapshots that were generated before backfilling was enabled. // This will delete all ACS snapshots for migration id where the SV node joined the network. - _ <- store.updateHistory.deleteAcsSnapshotsAfter( - historyId = store.updateHistory.historyId, + _ <- updateHistory.deleteAcsSnapshotsAfter( + historyId = updateHistory.historyId, migrationId = treeUpdate.migrationId, recordTime = CantonTimestamp.MinValue, ) @@ -172,12 +174,12 @@ class ScanHistoryBackfillingTrigger( // only with the visibility of the SV party and not the DSO party. // Note that this will also delete the import updates because they have a record time of 0, // which is good because we want to remove them. - _ <- store.updateHistory.deleteUpdatesBefore( + _ <- updateHistory.deleteUpdatesBefore( synchronizerId = treeUpdate.update.synchronizerId, migrationId = treeUpdate.migrationId, recordTime = treeUpdate.update.update.recordTime, ) - _ <- store.updateHistory + _ <- updateHistory .initializeBackfilling( treeUpdate.migrationId, treeUpdate.update.synchronizerId, @@ -203,7 +205,7 @@ class ScanHistoryBackfillingTrigger( synchronized { val batchSize = 100 for { - updates <- store.updateHistory.getUpdatesWithoutImportUpdates( + updates <- updateHistory.getUpdatesWithoutImportUpdates( findHistoryStartAfter, PageLimit.tryCreate(batchSize), ) @@ -260,7 +262,7 @@ class ScanHistoryBackfillingTrigger( val backfilling = new ScanHistoryBackfilling( connection = connection, - destinationHistory = store.updateHistory.destinationHistory, + destinationHistory = updateHistory.destinationHistory, currentMigrationId = currentMigrationId, batchSize = batchSize, loggerFactory = loggerFactory, diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/CachingScanStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/CachingScanStore.scala index 4a3f40ca22..6f538bd30b 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/CachingScanStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/CachingScanStore.scala @@ -58,6 +58,7 @@ class CachingScanStore( with FlagCloseableAsync with RetryProvider.Has { + override val storeName: String = store.storeName override lazy val txLogConfig: TxLogStore.Config[TxLogEntry] = store.txLogConfig override def key: ScanStore.Key = store.key @@ -306,12 +307,14 @@ class CachingScanStore( override def lookupContractByRecordTime[C, TCId <: ContractId[_], T]( companion: C, + updateHistory: UpdateHistory, recordTime: CantonTimestamp, )(implicit companionClass: MultiDomainAcsStore.ContractCompanion[C, TCId, T], tc: TraceContext, ): Future[Option[Contract[TCId, T]]] = store.lookupContractByRecordTime( companion, + updateHistory, recordTime, ) @@ -370,8 +373,6 @@ class CachingScanStore( override def multiDomainAcsStore: MultiDomainAcsStore = store.multiDomainAcsStore - override def updateHistory: UpdateHistory = store.updateHistory - @SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf")) private def getCache[Key, Value]( cacheName: String, diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala index 46634878ed..9788b37d47 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala @@ -38,6 +38,7 @@ import org.lfdecentralizedtrust.splice.store.{ PageLimit, SortOrder, TxLogAppStore, + UpdateHistory, VotesStore, } import org.lfdecentralizedtrust.splice.util.{Contract, ContractWithState, TemplateJsonDecoder} @@ -283,6 +284,7 @@ trait ScanStore def lookupContractByRecordTime[C, TCId <: ContractId[_], T]( companion: C, + updateHistory: UpdateHistory, recordTime: CantonTimestamp = CantonTimestamp.MinValue, )(implicit companionClass: ContractCompanion[C, TCId, T], @@ -311,7 +313,6 @@ object ScanStore { domainMigrationInfo: DomainMigrationInfo, participantId: ParticipantId, cacheConfigs: ScanCacheConfig, - enableImportUpdateBackfill: Boolean, metrics: DbScanStoreMetrics, initialRound: Long, )(implicit @@ -333,7 +334,6 @@ object ScanStore { createScanAggregatesReader, domainMigrationInfo, participantId, - enableImportUpdateBackfill, metrics, initialRound, ), diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala index eb15548904..6ebb14b465 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanStore.scala @@ -4,7 +4,7 @@ package org.lfdecentralizedtrust.splice.scan.store.db import com.daml.ledger.javaapi.data.codegen.ContractId -import com.digitalasset.canton.config.{NonNegativeDuration} +import com.digitalasset.canton.config.NonNegativeDuration import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{ AsyncCloseable, @@ -64,6 +64,7 @@ import org.lfdecentralizedtrust.splice.store.{ PageLimit, SortOrder, TxLogStore, + UpdateHistory, } import org.lfdecentralizedtrust.splice.util.{ Contract, @@ -74,7 +75,6 @@ import org.lfdecentralizedtrust.splice.util.{ } import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton import io.grpc.Status -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import org.lfdecentralizedtrust.splice.store.UpdateHistoryQueries.UpdateHistoryQueries import org.lfdecentralizedtrust.splice.store.db.AcsQueries.AcsStoreId import org.lfdecentralizedtrust.splice.store.db.TxLogQueries.TxLogStoreId @@ -96,7 +96,6 @@ class DbScanStore( createScanAggregatesReader: DbScanStore => ScanAggregatesReader, domainMigrationInfo: DomainMigrationInfo, participantId: ParticipantId, - enableImportUpdateBackfill: Boolean, storeMetrics: DbScanStoreMetrics, initialRound: Long, )(implicit @@ -130,10 +129,6 @@ class DbScanStore( ), domainMigrationInfo, participantId, - enableissue12777Workaround = true, - enableImportUpdateBackfill = enableImportUpdateBackfill, - BackfillingRequirement.NeedsBackfilling, - Some(storeMetrics.history), ) with ScanStore with AcsTables @@ -1077,8 +1072,10 @@ class DbScanStore( .toMap } + // TODO (#934): this method probably belongs in UpdateHistory instead override def lookupContractByRecordTime[C, TCId <: ContractId[_], T]( companion: C, + updateHistory: UpdateHistory, recordTime: CantonTimestamp, )(implicit companionClass: ContractCompanion[C, TCId, T], diff --git a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/scan/store/ScanEventStoreTest.scala b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/scan/store/ScanEventStoreTest.scala index 68a1599ac7..1355d9e5be 100644 --- a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/scan/store/ScanEventStoreTest.scala +++ b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/scan/store/ScanEventStoreTest.scala @@ -1,19 +1,18 @@ package org.lfdecentralizedtrust.splice.scan.store +import com.daml.metrics.api.noop.NoOpMetricsFactory import com.digitalasset.canton.HasExecutionContext import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} -import org.lfdecentralizedtrust.splice.store.PageLimit -import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo +import org.lfdecentralizedtrust.splice.store.{HistoryMetrics, PageLimit, StoreTest, UpdateHistory} import org.lfdecentralizedtrust.splice.scan.store.db.DbScanVerdictStore -import org.lfdecentralizedtrust.splice.store.UpdateHistory -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement -import org.lfdecentralizedtrust.splice.store.StoreTest import org.lfdecentralizedtrust.splice.store.db.SplicePostgresTest import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import io.circe.Json +import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo +import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import scala.concurrent.Future @@ -755,6 +754,7 @@ class ScanEventStoreTest extends StoreTest with HasExecutionContext with SpliceP loggerFactory, enableissue12777Workaround = true, enableImportUpdateBackfill = true, + HistoryMetrics(NoOpMetricsFactory, migrationId), ) uh.ingestionSink.initialize().map(_ => uh) } diff --git a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/AcsSnapshotStoreTest.scala b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/AcsSnapshotStoreTest.scala index e378f84077..79c139c447 100644 --- a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/AcsSnapshotStoreTest.scala +++ b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/AcsSnapshotStoreTest.scala @@ -3,12 +3,13 @@ package org.lfdecentralizedtrust.splice.store.db import cats.data.NonEmptyVector import com.daml.ledger.javaapi.data.Unit as damlUnit import com.daml.ledger.javaapi.data.codegen.ContractId +import com.daml.metrics.api.noop.NoOpMetricsFactory import org.lfdecentralizedtrust.splice.environment.DarResources import org.lfdecentralizedtrust.splice.environment.ledger.api.TransactionTreeUpdate -import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo import org.lfdecentralizedtrust.splice.scan.store.AcsSnapshotStore import org.lfdecentralizedtrust.splice.store.{ HardLimit, + HistoryMetrics, PageLimit, StoreErrors, StoreTest, @@ -24,6 +25,7 @@ import com.digitalasset.canton.util.MonadUtil import com.digitalasset.canton.{HasActorSystem, HasExecutionContext} import io.grpc.StatusRuntimeException import org.lfdecentralizedtrust.splice.codegen.java.splice.round as roundCodegen +import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import org.scalatest.Succeeded @@ -1026,6 +1028,7 @@ class AcsSnapshotStoreTest loggerFactory, enableissue12777Workaround = true, enableImportUpdateBackfill = true, + HistoryMetrics(NoOpMetricsFactory, migrationId), ) updateHistory.ingestionSink.initialize().map(_ => updateHistory) } diff --git a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala index 7e5efd2c33..8036fe9df2 100644 --- a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala +++ b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanAggregatorTest.scala @@ -977,7 +977,6 @@ class ScanAggregatorTest None, ), participantId = mkParticipantId("ScanAggregatorTest"), - enableImportUpdateBackfill = true, new DbScanStoreMetrics(new NoOpMetricsFactory(), loggerFactory, ProcessingTimeout()), initialRound = initialRound, )(parallelExecutionContext, implicitly, implicitly) diff --git a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanStoreTest.scala b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanStoreTest.scala index 3928f17757..d7ce435408 100644 --- a/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanStoreTest.scala +++ b/apps/scan/src/test/scala/org/lfdecentralizedtrust/splice/store/db/ScanStoreTest.scala @@ -1,6 +1,17 @@ package org.lfdecentralizedtrust.splice.store.db import com.daml.ledger.javaapi.data.{DamlRecord, Unit as damlUnit} +import com.daml.metrics.api.noop.NoOpMetricsFactory +import com.digitalasset.canton.concurrent.FutureSupervisor +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.resource.DbStorage +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.MonadUtil +import com.digitalasset.canton.{HasActorSystem, HasExecutionContext, SynchronizerAlias} +import org.lfdecentralizedtrust.splice.codegen.java.da.time.types.RelTime import org.lfdecentralizedtrust.splice.codegen.java.splice import org.lfdecentralizedtrust.splice.codegen.java.splice.amulet.{ Amulet, @@ -12,84 +23,44 @@ import org.lfdecentralizedtrust.splice.codegen.java.splice.amuletrules.{ AmuletRules_BuyMemberTrafficResult, AmuletRules_MintResult, } +import org.lfdecentralizedtrust.splice.codegen.java.splice.ans.AnsEntry import org.lfdecentralizedtrust.splice.codegen.java.splice.decentralizedsynchronizer.MemberTraffic +import org.lfdecentralizedtrust.splice.codegen.java.splice.dso.decentralizedsynchronizer as decentralizedsynchronizerCodegen +import org.lfdecentralizedtrust.splice.codegen.java.splice.dsorules.{DsoRules, Reason, Vote} import org.lfdecentralizedtrust.splice.codegen.java.splice.types.Round import org.lfdecentralizedtrust.splice.codegen.java.splice.validatorlicense.FaucetState import org.lfdecentralizedtrust.splice.codegen.java.splice.{ amulet as amuletCodegen, - round as roundCodegen, -} -import org.lfdecentralizedtrust.splice.codegen.java.splice.ans.AnsEntry -import org.lfdecentralizedtrust.splice.codegen.java.splice.{ cometbft as cometbftCodegen, dsorules as dsorulesCodegen, + round as roundCodegen, } -import org.lfdecentralizedtrust.splice.codegen.java.splice.dso.decentralizedsynchronizer as decentralizedsynchronizerCodegen -import org.lfdecentralizedtrust.splice.codegen.java.da.time.types.RelTime -import org.lfdecentralizedtrust.splice.codegen.java.splice.dsorules.{DsoRules, Reason, Vote} import org.lfdecentralizedtrust.splice.environment.{DarResources, RetryProvider} -import org.lfdecentralizedtrust.splice.history.{ - AmuletExpire, - ExternalPartyAmuletRules_CreateTransferCommand, - LockedAmuletExpireAmulet, - Transfer, - TransferCommand_Expire, - TransferCommand_Send, - TransferCommand_Withdraw, -} +import org.lfdecentralizedtrust.splice.history.* import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo import org.lfdecentralizedtrust.splice.scan.admin.api.client.commands.HttpScanAppClient -import org.lfdecentralizedtrust.splice.scan.store.{ - OpenMiningRoundTxLogEntry, - ReceiverAmount, - SenderAmount, - TransferCommandCreated, - TransferCommandExpired, - TransferCommandFailed, - TransferCommandSent, - TransferCommandTxLogEntry, - TransferCommandWithdrawn, - TransferTxLogEntry, -} -import org.lfdecentralizedtrust.splice.scan.store.ScanStore import org.lfdecentralizedtrust.splice.scan.store.db.{ DbScanStore, DbScanStoreMetrics, ScanAggregatesReader, ScanAggregator, } -import org.lfdecentralizedtrust.splice.store.{PageLimit, SortOrder, StoreErrors, StoreTest} +import org.lfdecentralizedtrust.splice.scan.store.* import org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.ContractState.Assigned +import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import org.lfdecentralizedtrust.splice.store.events.DsoRulesCloseVoteRequest +import org.lfdecentralizedtrust.splice.store.* import org.lfdecentralizedtrust.splice.util.SpliceUtil.damlDecimal -import org.lfdecentralizedtrust.splice.util.{ - Contract, - ContractWithState, - EventId, - ResourceTemplateDecoder, - TemplateJsonDecoder, -} -import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.crypto.Fingerprint -import com.digitalasset.canton.data.CantonTimestamp -import com.daml.metrics.api.noop.NoOpMetricsFactory -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown -import com.digitalasset.canton.resource.DbStorage -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.{HasActorSystem, HasExecutionContext, SynchronizerAlias} +import org.lfdecentralizedtrust.splice.util.* import java.time.Instant +import java.time.temporal.ChronoUnit import java.util.{Collections, Optional} -import scala.concurrent.Future +import scala.concurrent.{ExecutionContext, Future} import scala.jdk.CollectionConverters.* import scala.jdk.OptionConverters.* import scala.math.BigDecimal.javaBigDecimal2bigDecimal import scala.reflect.ClassTag -import com.digitalasset.canton.util.MonadUtil - -import java.time.temporal.ChronoUnit -import scala.concurrent.ExecutionContext abstract class ScanStoreTest extends StoreTest @@ -1832,24 +1803,26 @@ abstract class ScanStoreTest val recordTimeThird = now.plusSeconds(9).toInstant for { store <- mkStore() - _ <- store.updateHistory.ingestionSink.initialize() + updateHistory <- mkUpdateHistory(domainMigrationId) + _ <- updateHistory.ingestionSink.initialize() first <- dummyDomain.create( firstDsoRules, recordTime = recordTimeFirst, )( - store.updateHistory + updateHistory ) firstRecordTime = CantonTimestamp.fromInstant(first.getRecordTime).getOrElse(now) _ <- dummyDomain.create( secondDsoRules, recordTime = recordTimeSecond, - )(store.updateHistory) + )(updateHistory) _ <- dummyDomain.create( thirdDsoRules, recordTime = recordTimeThird, - )(store.updateHistory) + )(updateHistory) result <- store.lookupContractByRecordTime( DsoRules.COMPANION, + updateHistory, firstRecordTime.plusSeconds(1), ) } yield { @@ -1869,26 +1842,28 @@ abstract class ScanStoreTest val recordTimeThird = now.plusSeconds(9).toInstant for { store <- mkStore() - _ <- store.updateHistory.ingestionSink.initialize() + updateHistory <- mkUpdateHistory(domainMigrationId) + _ <- updateHistory.ingestionSink.initialize() first <- dummyDomain.create( firstAmuletRules, recordTime = recordTimeFirst, )( - store.updateHistory + updateHistory ) firstRecordTime = CantonTimestamp.fromInstant(first.getRecordTime).getOrElse(now) _ <- dummyDomain.create( secondAmuletRules, recordTime = recordTimeSecond, )( - store.updateHistory + updateHistory ) _ <- dummyDomain.create( thirdAmuletRules, recordTime = recordTimeThird, - )(store.updateHistory) + )(updateHistory) result <- store.lookupContractByRecordTime( AmuletRules.COMPANION, + updateHistory, firstRecordTime.plusSeconds(1), ) } yield { @@ -1904,6 +1879,10 @@ abstract class ScanStoreTest dsoParty: PartyId = dsoParty ): Future[ScanStore] + protected def mkUpdateHistory( + migrationId: Long + ): Future[UpdateHistory] + private lazy val user1 = userParty(1) private lazy val user2 = userParty(2) @@ -2353,7 +2332,6 @@ class DbScanStoreTest None, ), participantId = mkParticipantId("ScanStoreTest"), - enableImportUpdateBackfill = true, new DbScanStoreMetrics(new NoOpMetricsFactory(), loggerFactory, timeouts), initialRound = 0, )(parallelExecutionContext, implicitly, implicitly) @@ -2368,6 +2346,24 @@ class DbScanStoreTest } yield store } + override def mkUpdateHistory( + migrationId: Long + ): Future[UpdateHistory] = { + val updateHistory = new UpdateHistory( + storage.underlying, // not under test + new DomainMigrationInfo(migrationId, None), + "update_history_scan_store_test", + mkParticipantId("whatever"), + dsoParty, + BackfillingRequirement.BackfillingNotRequired, + loggerFactory, + enableissue12777Workaround = true, + enableImportUpdateBackfill = true, + HistoryMetrics(NoOpMetricsFactory, migrationId), + ) + updateHistory.ingestionSink.initialize().map(_ => updateHistory) + } + override protected def cleanDb( storage: DbStorage )(implicit traceContext: TraceContext): FutureUnlessShutdown[?] = diff --git a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala index a976dcff63..4b7b515057 100644 --- a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala +++ b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala @@ -129,6 +129,7 @@ class SplitwellApp( migrationInfo, participantId, ) + // splitwell does not need to have UpdateHistory automation = new SplitwellAutomationService( config.automation, clock, diff --git a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala index 195c3b3ba3..cb7ffbf855 100644 --- a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala +++ b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala @@ -65,7 +65,6 @@ class SplitwellAutomationService( ledgerClient, retryProvider, ingestFromParticipantBegin = true, - ingestUpdateHistoryFromParticipantBegin = true, params, ) { diff --git a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/db/DbSplitwellStore.scala b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/db/DbSplitwellStore.scala index d5c3b800bb..c717173f25 100644 --- a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/db/DbSplitwellStore.scala +++ b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/db/DbSplitwellStore.scala @@ -31,7 +31,6 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import org.lfdecentralizedtrust.splice.store.db.AcsQueries.AcsStoreId import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton @@ -66,9 +65,6 @@ class DbSplitwellStore( ), domainMigrationInfo = domainMigrationInfo, participantId = participantId, - enableissue12777Workaround = false, - enableImportUpdateBackfill = false, - BackfillingRequirement.BackfillingNotRequired, ) with AcsTables with AcsQueries diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala index 7651d9c456..0a6c543ce6 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala @@ -81,7 +81,6 @@ class SvDsoAutomationService( ledgerClient, retryProvider, config.ingestFromParticipantBegin, - config.ingestUpdateHistoryFromParticipantBegin, config.parameters, ) { @@ -89,6 +88,8 @@ class SvDsoAutomationService( : org.lfdecentralizedtrust.splice.sv.automation.SvDsoAutomationService.type = SvDsoAutomationService + // notice the absence of UpdateHistory: the history for the dso party is duplicate with Scan + private[splice] val restartDsoDelegateBasedAutomationTrigger = new RestartDsoDelegateBasedAutomationTrigger( triggerContext, @@ -438,6 +439,8 @@ class SvDsoAutomationService( new SequencerPruningTrigger( contextWithSpecificPolling, dsoStore, + config.scan, + upgradesConfig, sequencerContext.sequencerAdminConnection, sequencerContext.mediatorAdminConnection, clock, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala index 9b1c98a434..2dde7aa68e 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala @@ -56,7 +56,6 @@ class SvSvAutomationService( ledgerClient, retryProvider, config.ingestFromParticipantBegin, - config.ingestUpdateHistoryFromParticipantBegin, config.parameters, ) { override def companion: org.lfdecentralizedtrust.splice.sv.automation.SvSvAutomationService.type = @@ -69,6 +68,8 @@ class SvSvAutomationService( ) ) + // notice the absence of UpdateHistory: the history for the sv party is not needed as we don't foresee ever adding TxLog for it + registerTrigger( SqlIndexInitializationTrigger( storage, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala index 460e0e67c8..572f96f26c 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala @@ -10,7 +10,7 @@ import org.lfdecentralizedtrust.splice.environment.{ SequencerAdminConnection, } import org.lfdecentralizedtrust.splice.sv.store.SvDsoStore -import org.lfdecentralizedtrust.splice.util.DomainRecordTimeRange +import org.lfdecentralizedtrust.splice.util.{DomainRecordTimeRange, TemplateJsonDecoder} import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, SyncCloseable} @@ -24,14 +24,25 @@ import io.opentelemetry.api.trace.Tracer import scala.jdk.DurationConverters.* import io.grpc.Status +import org.apache.pekko.stream.Materializer +import org.lfdecentralizedtrust.splice.config.{NetworkAppClientConfig, UpgradesConfig} +import org.lfdecentralizedtrust.splice.http.HttpClient +import org.lfdecentralizedtrust.splice.scan.admin.api.client.{ + BackfillingScanConnection, + ScanConnection, +} +import org.lfdecentralizedtrust.splice.scan.config.ScanAppClientConfig +import org.lfdecentralizedtrust.splice.sv.config.SvScanConfig -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.{ExecutionContextExecutor, Future} /** A trigger to periodically call the sequencer pruning command */ class SequencerPruningTrigger( override protected val context: TriggerContext, store: SvDsoStore, + scanConfig: Option[SvScanConfig], + upgradesConfig: UpgradesConfig, sequencerAdminConnection: SequencerAdminConnection, mediatorAdminConnection: MediatorAdminConnection, clock: Clock, @@ -39,19 +50,50 @@ class SequencerPruningTrigger( participantAdminConnection: ParticipantAdminConnection, migrationId: Long, )(implicit - override val ec: ExecutionContext, + override val ec: ExecutionContextExecutor, override val tracer: Tracer, + mat: Materializer, + httpClient: HttpClient, + templateDecoder: TemplateJsonDecoder, ) extends PollingTrigger { val pruningMetrics = new SequencerPruningMetrics( context.metricsFactory ) + // Similar to PublishScanConfigTrigger, this class creates its own scan connection + // on demand, because scan might not be available at application startup. + private def createScanConnection()(implicit + tc: TraceContext + ): Future[BackfillingScanConnection] = + scanConfig match { + case None => + Future.failed( + Status.UNAVAILABLE + .withDescription( + "This application is not configured to connect to a scan service. " + + " Check the application configuration or use the scan API to query votes information." + ) + .asRuntimeException() + ) + case Some(scanConfig) => + ScanConnection + .singleUncached( + ScanAppClientConfig(NetworkAppClientConfig(scanConfig.internalUrl)), + upgradesConfig, + clock, + context.retryProvider, + loggerFactory, + retryConnectionOnInitialFailure = true, + ) + } + override def performWorkIfAvailable()(implicit traceContext: TraceContext): Future[Boolean] = for { synchronizerId <- sequencerAdminConnection.getStatus.map(_.trySuccess.synchronizerId) - recordTimeRangeO <- store.updateHistory - .getRecordTimeRange(migrationId) - .map(_.get(synchronizerId)) + scanConnection <- createScanConnection() + recordTimeRangeO <- scanConnection + .getMigrationInfo(migrationId) + .map(_.flatMap(_.recordTimeRange.get(synchronizerId))) _ <- recordTimeRangeO match { case Some(DomainRecordTimeRange(earliest, latest)) if (latest - earliest).compareTo(retentionPeriod.asJava) > 0 => diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvDsoStore.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvDsoStore.scala index 908d78ada7..eba85a93cb 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvDsoStore.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvDsoStore.scala @@ -60,7 +60,6 @@ import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.toSQLAc import com.digitalasset.canton.topology.{Member, ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import io.grpc.Status -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import slick.jdbc.GetResult import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton import slick.jdbc.canton.SQLActionBuilder @@ -98,9 +97,6 @@ class DbSvDsoStore( ), domainMigrationInfo, participantId, - enableissue12777Workaround = false, - enableImportUpdateBackfill = false, - BackfillingRequirement.BackfillingNotRequired, ) with SvDsoStore with AcsTables diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala index c6d3814464..e88ce12c72 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/db/DbSvSvStore.scala @@ -20,7 +20,6 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.tracing.TraceContext -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import org.lfdecentralizedtrust.splice.store.db.AcsQueries.AcsStoreId import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton @@ -55,9 +54,6 @@ class DbSvSvStore( ), domainMigrationInfo = domainMigrationInfo, participantId = participantId, - enableissue12777Workaround = false, - enableImportUpdateBackfill = false, - BackfillingRequirement.BackfillingNotRequired, ) with SvSvStore with AcsTables diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala index e986cccc38..f4f89e6629 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala @@ -42,7 +42,7 @@ import org.lfdecentralizedtrust.splice.setup.{ ParticipantInitializer, ParticipantPartyMigrator, } -import org.lfdecentralizedtrust.splice.store.{AppStoreWithIngestion, UpdateHistory} +import org.lfdecentralizedtrust.splice.store.{AppStoreWithIngestion, HistoryMetrics, UpdateHistory} import org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.QueryResult import org.lfdecentralizedtrust.splice.util.{ AmuletConfigSchedule, @@ -87,7 +87,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.ledger.api.util.DurationConversion import com.digitalasset.canton.lifecycle.LifeCycle import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.{DbStorage, Storage} import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{PartyId, SynchronizerId} import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} @@ -102,6 +102,7 @@ import org.apache.pekko.http.scaladsl.server.Directives.* import org.apache.pekko.http.scaladsl.server.directives.BasicDirectives import com.google.protobuf.ByteString import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority +import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import scala.concurrent.{ExecutionContextExecutor, Future} import scala.util.{Failure, Success} @@ -111,7 +112,7 @@ class ValidatorApp( override val name: InstanceName, val config: ValidatorAppBackendConfig, val amuletAppParameters: SharedSpliceAppParameters, - storage: Storage, + storage: DbStorage, override protected val clock: Clock, val loggerFactory: NamedLoggerFactory, tracerProvider: TracerProvider, @@ -740,6 +741,18 @@ class ValidatorApp( domainMigrationInfo, participantId, ) + validatorUpdateHistory = new UpdateHistory( + storage, + domainMigrationInfo, + store.storeName, + participantId, + store.acsContractFilter.ingestionFilter.primaryParty, + BackfillingRequirement.BackfillingNotRequired, + loggerFactory, + enableissue12777Workaround = false, + enableImportUpdateBackfill = false, + HistoryMetrics(retryProvider.metricsFactory, domainMigrationInfo.currentMigrationId), + ) domainTimeAutomationService = new DomainTimeAutomationService( config.domains.global.alias, participantAdminConnection, @@ -781,7 +794,7 @@ class ValidatorApp( clock, domainTimeAutomationService.domainTimeSync, domainParamsAutomationService.domainUnpausedSync, - storage: Storage, + storage, retryProvider, loggerFactory, domainMigrationInfo, @@ -835,6 +848,7 @@ class ValidatorApp( domainParamsAutomationService.domainUnpausedSync, walletManagerOpt, store, + validatorUpdateHistory, storage, scanConnection, ledgerClient, diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala index 10014318ae..574081706e 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala @@ -7,6 +7,7 @@ import org.lfdecentralizedtrust.splice.automation.{ AutomationServiceCompanion, SpliceAppAutomationService, SqlIndexInitializationTrigger, + UpdateIngestionService, } import org.lfdecentralizedtrust.splice.config.{ AutomationConfig, @@ -19,6 +20,7 @@ import org.lfdecentralizedtrust.splice.scan.admin.api.client.BftScanConnection import org.lfdecentralizedtrust.splice.store.{ DomainTimeSynchronization, DomainUnpausedSynchronization, + UpdateHistory, } import org.lfdecentralizedtrust.splice.util.QualifiedName import org.lfdecentralizedtrust.splice.validator.domain.DomainConnector @@ -59,6 +61,7 @@ class ValidatorAutomationService( domainUnpausedSync: DomainUnpausedSynchronization, walletManagerOpt: Option[UserWalletManager], // None when config.enableWallet=false store: ValidatorStore, + val updateHistory: UpdateHistory, storage: Storage, scanConnection: BftScanConnection, ledgerClient: SpliceLedgerClient, @@ -91,13 +94,25 @@ class ValidatorAutomationService( ledgerClient, retryProvider, ingestFromParticipantBegin, - ingestUpdateHistoryFromParticipantBegin, params, ) { override def companion : org.lfdecentralizedtrust.splice.validator.automation.ValidatorAutomationService.type = ValidatorAutomationService + registerService( + new UpdateIngestionService( + updateHistory.getClass.getSimpleName, + updateHistory.ingestionSink, + connection(SpliceLedgerConnectionPriority.High), + automationConfig, + backoffClock = triggerContext.pollingClock, + triggerContext.retryProvider, + triggerContext.loggerFactory, + ingestUpdateHistoryFromParticipantBegin, + ) + ) + automationConfig.topologyMetricsPollingInterval.foreach(topologyPollingInterval => registerTrigger( new TopologyMetricsTrigger( diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/db/DbValidatorStore.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/db/DbValidatorStore.scala index e2c061f894..a40c6caea5 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/db/DbValidatorStore.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/db/DbValidatorStore.scala @@ -42,7 +42,6 @@ import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import org.lfdecentralizedtrust.splice.automation.MultiDomainExpiredContractTrigger.ListExpiredContracts -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import org.lfdecentralizedtrust.splice.store.db.AcsQueries.AcsStoreId import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton @@ -77,9 +76,6 @@ class DbValidatorStore( ), domainMigrationInfo = domainMigrationInfo, participantId = participantId, - enableissue12777Workaround = false, - enableImportUpdateBackfill = false, - BackfillingRequirement.BackfillingNotRequired, ) with ValidatorStore with AcsTables diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletManager.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletManager.scala index 8dcca6170e..e934a8164d 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletManager.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletManager.scala @@ -16,7 +16,7 @@ import org.lfdecentralizedtrust.splice.util.{HasHealth, TemplateJsonDecoder} import org.lfdecentralizedtrust.splice.wallet.store.{ExternalPartyWalletStore, WalletStore} import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext @@ -35,7 +35,7 @@ class ExternalPartyWalletManager( private[splice] val clock: Clock, domainTimeSync: DomainTimeSynchronization, domainUnpausedSync: DomainUnpausedSynchronization, - storage: Storage, + storage: DbStorage, retryProvider: RetryProvider, override val loggerFactory: NamedLoggerFactory, domainMigrationInfo: DomainMigrationInfo, diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletService.scala index 2a77835b13..410e816f15 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/ExternalPartyWalletService.scala @@ -9,17 +9,20 @@ import org.lfdecentralizedtrust.splice.migration.DomainMigrationInfo import org.lfdecentralizedtrust.splice.store.{ DomainTimeSynchronization, DomainUnpausedSynchronization, + HistoryMetrics, + UpdateHistory, } import org.lfdecentralizedtrust.splice.util.{HasHealth, TemplateJsonDecoder} import org.lfdecentralizedtrust.splice.wallet.automation.ExternalPartyWalletAutomationService import org.lfdecentralizedtrust.splice.wallet.store.ExternalPartyWalletStore import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.ParticipantId import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.Materializer +import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import scala.concurrent.ExecutionContext @@ -31,7 +34,7 @@ class ExternalPartyWalletService( clock: Clock, domainTimeSync: DomainTimeSynchronization, domainUnpausedSync: DomainUnpausedSynchronization, - storage: Storage, + storage: DbStorage, override protected[this] val retryProvider: RetryProvider, override val loggerFactory: NamedLoggerFactory, domainMigrationInfo: DomainMigrationInfo, @@ -60,8 +63,22 @@ class ExternalPartyWalletService( participantId, ) + val updateHistory = new UpdateHistory( + storage, + domainMigrationInfo, + store.storeName, + participantId, + store.acsContractFilter.ingestionFilter.primaryParty, + BackfillingRequirement.BackfillingNotRequired, + loggerFactory, + enableissue12777Workaround = false, + enableImportUpdateBackfill = false, + HistoryMetrics(retryProvider.metricsFactory, domainMigrationInfo.currentMigrationId), + ) + val automation = new ExternalPartyWalletAutomationService( store, + updateHistory, ledgerClient, automationConfig, clock, @@ -79,6 +96,7 @@ class ExternalPartyWalletService( override def onClosed(): Unit = { automation.close() + updateHistory.close() store.close() super.onClosed() } diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletManager.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletManager.scala index 54d5a0f542..3fb7c8d4ca 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletManager.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletManager.scala @@ -31,7 +31,7 @@ import org.lfdecentralizedtrust.splice.wallet.store.{UserWalletStore, WalletStor import org.lfdecentralizedtrust.splice.wallet.util.ValidatorTopupConfig import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext @@ -53,7 +53,7 @@ class UserWalletManager( domainTimeSync: DomainTimeSynchronization, domainUnpausedSync: DomainUnpausedSynchronization, treasuryConfig: TreasuryConfig, - storage: Storage, + storage: DbStorage, retryProvider: RetryProvider, scanConnection: BftScanConnection, packageVersionSupport: PackageVersionSupport, diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala index ac8720a9e5..3c5a88eedd 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/UserWalletService.scala @@ -11,6 +11,8 @@ import org.lfdecentralizedtrust.splice.scan.admin.api.client.BftScanConnection import org.lfdecentralizedtrust.splice.store.{ DomainTimeSynchronization, DomainUnpausedSynchronization, + HistoryMetrics, + UpdateHistory, } import org.lfdecentralizedtrust.splice.util.{HasHealth, SpliceCircuitBreaker, TemplateJsonDecoder} import org.lfdecentralizedtrust.splice.wallet.automation.UserWalletAutomationService @@ -24,13 +26,14 @@ import org.lfdecentralizedtrust.splice.wallet.treasury.TreasuryService import org.lfdecentralizedtrust.splice.wallet.util.ValidatorTopupConfig import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.ParticipantId import io.opentelemetry.api.trace.Tracer import org.apache.pekko.actor.Scheduler import org.apache.pekko.stream.Materializer import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority +import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import scala.concurrent.ExecutionContext @@ -44,7 +47,7 @@ class UserWalletService( domainTimeSync: DomainTimeSynchronization, domainUnpausedSync: DomainUnpausedSynchronization, treasuryConfig: TreasuryConfig, - storage: Storage, + storage: DbStorage, override protected[this] val retryProvider: RetryProvider, override val loggerFactory: NamedLoggerFactory, scanConnection: BftScanConnection, @@ -83,6 +86,20 @@ class UserWalletService( participantId, ) + val updateHistory: UpdateHistory = + new UpdateHistory( + storage, + domainMigrationInfo, + store.storeName, + participantId, + store.acsContractFilter.ingestionFilter.primaryParty, + BackfillingRequirement.BackfillingNotRequired, + loggerFactory, + enableissue12777Workaround = true, + enableImportUpdateBackfill = false, + HistoryMetrics(retryProvider.metricsFactory, domainMigrationInfo.currentMigrationId), + ) + val treasury: TreasuryService = new TreasuryService( // The treasury gets its own connection, and is required to manage waiting for the store on its own. ledgerClient.connection( @@ -106,6 +123,7 @@ class UserWalletService( val automation = new UserWalletAutomationService( store, + updateHistory, treasury, ledgerClient, automationConfig, diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala index 2e9a698797..570cedf90b 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala @@ -6,6 +6,7 @@ package org.lfdecentralizedtrust.splice.wallet.automation import org.lfdecentralizedtrust.splice.automation.{ AutomationServiceCompanion, SpliceAppAutomationService, + UpdateIngestionService, } import AutomationServiceCompanion.TriggerClass import org.lfdecentralizedtrust.splice.config.{AutomationConfig, SpliceParametersConfig} @@ -13,17 +14,20 @@ import org.lfdecentralizedtrust.splice.environment.* import org.lfdecentralizedtrust.splice.store.{ DomainTimeSynchronization, DomainUnpausedSynchronization, + UpdateHistory, } import org.lfdecentralizedtrust.splice.wallet.store.ExternalPartyWalletStore import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.time.Clock import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.Materializer +import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import scala.concurrent.ExecutionContext class ExternalPartyWalletAutomationService( store: ExternalPartyWalletStore, + updateHistory: UpdateHistory, ledgerClient: SpliceLedgerClient, automationConfig: AutomationConfig, clock: Clock, @@ -47,12 +51,24 @@ class ExternalPartyWalletAutomationService( ledgerClient, retryProvider, ingestFromParticipantBegin, - ingestUpdateHistoryFromParticipantBegin, params, ) { override def companion : org.lfdecentralizedtrust.splice.wallet.automation.ExternalPartyWalletAutomationService.type = ExternalPartyWalletAutomationService + + registerService( + new UpdateIngestionService( + updateHistory.getClass.getSimpleName, + updateHistory.ingestionSink, + connection(SpliceLedgerConnectionPriority.High), + automationConfig, + backoffClock = triggerContext.pollingClock, + triggerContext.retryProvider, + triggerContext.loggerFactory, + ingestUpdateHistoryFromParticipantBegin, + ) + ) } object ExternalPartyWalletAutomationService extends AutomationServiceCompanion { diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala index a15b0d2210..e639668386 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala @@ -10,6 +10,7 @@ import org.lfdecentralizedtrust.splice.automation.{ TransferFollowTrigger, TxLogBackfillingTrigger, UnassignTrigger, + UpdateIngestionService, } import AutomationServiceCompanion.{TriggerClass, aTrigger} import org.lfdecentralizedtrust.splice.config.{AutomationConfig, SpliceParametersConfig} @@ -19,6 +20,7 @@ import org.lfdecentralizedtrust.splice.scan.admin.api.client.BftScanConnection import org.lfdecentralizedtrust.splice.store.{ DomainTimeSynchronization, DomainUnpausedSynchronization, + UpdateHistory, } import org.lfdecentralizedtrust.splice.util.QualifiedName import org.lfdecentralizedtrust.splice.wallet.config.{AutoAcceptTransfersConfig, WalletSweepConfig} @@ -35,6 +37,7 @@ import scala.concurrent.ExecutionContext class UserWalletAutomationService( store: UserWalletStore, + val updateHistory: UpdateHistory, treasury: TreasuryService, ledgerClient: SpliceLedgerClient, automationConfig: AutomationConfig, @@ -67,13 +70,25 @@ class UserWalletAutomationService( ledgerClient, retryProvider, ingestFromParticipantBegin, - ingestUpdateHistoryFromParticipantBegin, paramsConfig, ) { override def companion : org.lfdecentralizedtrust.splice.wallet.automation.UserWalletAutomationService.type = UserWalletAutomationService + registerService( + new UpdateIngestionService( + updateHistory.getClass.getSimpleName, + updateHistory.ingestionSink, + connection(SpliceLedgerConnectionPriority.High), + automationConfig, + backoffClock = triggerContext.pollingClock, + triggerContext.retryProvider, + triggerContext.loggerFactory, + ingestUpdateHistoryFromParticipantBegin, + ) + ) + registerTrigger( new ExpireTransferOfferTrigger( triggerContext, @@ -182,6 +197,7 @@ class UserWalletAutomationService( registerTrigger( new TxLogBackfillingTrigger( store, + updateHistory, txLogBackfillingBatchSize, triggerContext, ) diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbExternalPartyWalletStore.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbExternalPartyWalletStore.scala index acbc993afd..0e64ddb202 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbExternalPartyWalletStore.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbExternalPartyWalletStore.scala @@ -20,7 +20,6 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.topology.ParticipantId -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import scala.concurrent.* @@ -52,9 +51,6 @@ class DbExternalPartyWalletStore( ), domainMigrationInfo, participantId, - enableissue12777Workaround = false, - enableImportUpdateBackfill = false, - BackfillingRequirement.BackfillingNotRequired, ) with ExternalPartyWalletStore with AcsTables @@ -64,10 +60,9 @@ class DbExternalPartyWalletStore( override def toString: String = show"DbExternalPartyWalletStore(externalParty=${key.externalParty})" - override protected def acsContractFilter - : org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.ContractFilter[ - org.lfdecentralizedtrust.splice.wallet.store.db.WalletTables.ExternalPartyWalletAcsStoreRowData, - AcsInterfaceViewRowData.NoInterfacesIngested, - ] = ExternalPartyWalletStore.contractFilter(key) + override def acsContractFilter: org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.ContractFilter[ + org.lfdecentralizedtrust.splice.wallet.store.db.WalletTables.ExternalPartyWalletAcsStoreRowData, + AcsInterfaceViewRowData.NoInterfacesIngested, + ] = ExternalPartyWalletStore.contractFilter(key) } diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbUserWalletStore.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbUserWalletStore.scala index 1b7e086d04..30a090a121 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbUserWalletStore.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/db/DbUserWalletStore.scala @@ -44,7 +44,6 @@ import com.digitalasset.canton.util.ShowUtil.* import slick.jdbc.canton.ActionBasedSQLInterpolation.Implicits.actionBasedSQLInterpolationCanton import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.toSQLActionBuilderChain import com.digitalasset.canton.topology.{ParticipantId, PartyId} -import org.lfdecentralizedtrust.splice.store.UpdateHistory.BackfillingRequirement import org.lfdecentralizedtrust.splice.store.db.TxLogQueries.TxLogStoreId import slick.jdbc.canton.SQLActionBuilder @@ -97,9 +96,6 @@ class DbUserWalletStore( ), domainMigrationInfo, participantId, - enableissue12777Workaround = true, - enableImportUpdateBackfill = false, - BackfillingRequirement.BackfillingNotRequired, ) with UserWalletStore with AcsTables @@ -116,7 +112,7 @@ class DbUserWalletStore( override def toString: String = show"DbUserWalletStore(endUserParty=${key.endUserParty})" - override protected def acsContractFilter + override def acsContractFilter : org.lfdecentralizedtrust.splice.store.MultiDomainAcsStore.ContractFilter[ org.lfdecentralizedtrust.splice.wallet.store.db.WalletTables.UserWalletAcsStoreRowData, org.lfdecentralizedtrust.splice.wallet.store.db.WalletTables.UserWalletAcsInterfaceViewRowData, From ad2b047504c329a7b6cf115970fac1b8725a8240 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Mon, 13 Oct 2025 15:08:38 +0000 Subject: [PATCH 02/10] [ci] when has lazy not fixed a npe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz --- .../org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala index de26656d8b..9196061cfd 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/db/DbAppStore.scala @@ -90,7 +90,7 @@ abstract class DbAppStore( handleIngestionSummary, ) - override val storeName: String = multiDomainAcsStore.storeName + override lazy val storeName: String = multiDomainAcsStore.storeName override lazy val domains: InMemorySynchronizerStore = new InMemorySynchronizerStore( From 3aefc005748dc064410c0c11a9c35a0425caac23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Tue, 14 Oct 2025 12:25:19 +0000 Subject: [PATCH 03/10] [ci] remove register duplicates MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz --- .../SpliceAppAutomationService.scala | 19 +++++++++++++++++++ .../automation/ScanAutomationService.scala | 14 +++----------- .../ValidatorAutomationService.scala | 14 +++----------- ...ExternalPartyWalletAutomationService.scala | 14 +++----------- .../UserWalletAutomationService.scala | 14 +++----------- 5 files changed, 31 insertions(+), 44 deletions(-) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala index 0e5c6b5c95..6051e8b677 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SpliceAppAutomationService.scala @@ -15,6 +15,7 @@ import org.lfdecentralizedtrust.splice.store.{ AppStoreWithIngestion, DomainTimeSynchronization, DomainUnpausedSynchronization, + UpdateHistory, } import com.digitalasset.canton.time.{Clock, WallClock} import com.digitalasset.canton.tracing.TraceContext @@ -96,6 +97,24 @@ abstract class SpliceAppAutomationService[Store <: AppStore]( case SpliceLedgerConnectionPriority.AmuletExpiry => amuletExpiryConnection } + final protected def registerUpdateHistoryIngestion( + updateHistory: UpdateHistory, + ingestUpdateHistoryFromParticipantBegin: Boolean, + ): Unit = { + registerService( + new UpdateIngestionService( + updateHistory.getClass.getSimpleName, + updateHistory.ingestionSink, + connection(SpliceLedgerConnectionPriority.High), + automationConfig, + backoffClock = triggerContext.pollingClock, + triggerContext.retryProvider, + triggerContext.loggerFactory, + ingestUpdateHistoryFromParticipantBegin, + ) + ) + } + private def completionOffsetCallback(offset: Long): Future[Unit] = store.multiDomainAcsStore.signalWhenIngestedOrShutdown(offset)(TraceContext.empty) diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala index 548e250933..86053986a5 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala @@ -75,17 +75,9 @@ class ScanAutomationService( new ScanBackfillAggregatesTrigger(store, triggerContext, initialRound) ) - registerService( - new UpdateIngestionService( - updateHistory.getClass.getSimpleName, - updateHistory.ingestionSink, - connection(SpliceLedgerConnectionPriority.High), - automationConfig, - backoffClock = triggerContext.pollingClock, - triggerContext.retryProvider, - triggerContext.loggerFactory, - ingestUpdateHistoryFromParticipantBegin, - ) + registerUpdateHistoryIngestion( + updateHistory, + ingestUpdateHistoryFromParticipantBegin, ) if (config.updateHistoryBackfillEnabled) { diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala index 574081706e..417fb57bd6 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala @@ -100,17 +100,9 @@ class ValidatorAutomationService( : org.lfdecentralizedtrust.splice.validator.automation.ValidatorAutomationService.type = ValidatorAutomationService - registerService( - new UpdateIngestionService( - updateHistory.getClass.getSimpleName, - updateHistory.ingestionSink, - connection(SpliceLedgerConnectionPriority.High), - automationConfig, - backoffClock = triggerContext.pollingClock, - triggerContext.retryProvider, - triggerContext.loggerFactory, - ingestUpdateHistoryFromParticipantBegin, - ) + registerUpdateHistoryIngestion( + updateHistory, + ingestUpdateHistoryFromParticipantBegin, ) automationConfig.topologyMetricsPollingInterval.foreach(topologyPollingInterval => diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala index 570cedf90b..5a582cfdfa 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala @@ -57,17 +57,9 @@ class ExternalPartyWalletAutomationService( : org.lfdecentralizedtrust.splice.wallet.automation.ExternalPartyWalletAutomationService.type = ExternalPartyWalletAutomationService - registerService( - new UpdateIngestionService( - updateHistory.getClass.getSimpleName, - updateHistory.ingestionSink, - connection(SpliceLedgerConnectionPriority.High), - automationConfig, - backoffClock = triggerContext.pollingClock, - triggerContext.retryProvider, - triggerContext.loggerFactory, - ingestUpdateHistoryFromParticipantBegin, - ) + registerUpdateHistoryIngestion( + updateHistory, + ingestUpdateHistoryFromParticipantBegin, ) } diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala index e639668386..d3fa963311 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala @@ -76,17 +76,9 @@ class UserWalletAutomationService( : org.lfdecentralizedtrust.splice.wallet.automation.UserWalletAutomationService.type = UserWalletAutomationService - registerService( - new UpdateIngestionService( - updateHistory.getClass.getSimpleName, - updateHistory.ingestionSink, - connection(SpliceLedgerConnectionPriority.High), - automationConfig, - backoffClock = triggerContext.pollingClock, - triggerContext.retryProvider, - triggerContext.loggerFactory, - ingestUpdateHistoryFromParticipantBegin, - ) + registerUpdateHistoryIngestion( + updateHistory, + ingestUpdateHistoryFromParticipantBegin, ) registerTrigger( From 695a66e034562a3b64da0a2f6b9a44671bdaa712 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Tue, 14 Oct 2025 12:58:34 +0000 Subject: [PATCH 04/10] [ci] DbStorage errywhere MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz --- .../SqlIndexInitializationTrigger.scala | 20 ++++----- .../splice/store/UpdateHistory.scala | 22 ++++------ .../automation/ScanAutomationService.scala | 6 +-- .../splice/scan/store/AcsSnapshotStore.scala | 10 ++--- .../splice/scan/store/ScanStore.scala | 44 +++++++++---------- .../scan/store/db/DbScanVerdictStore.scala | 8 +--- .../splice/splitwell/SplitwellApp.scala | 6 +-- .../SplitwellAutomationService.scala | 4 +- .../splitwell/store/SplitwellStore.scala | 26 +++++------ .../splice/sv/SvApp.scala | 6 +-- .../sv/automation/SvSvAutomationService.scala | 4 +- .../sv/onboarding/NodeInitializerUtil.scala | 4 +- .../DomainMigrationInitializer.scala | 4 +- .../joining/JoiningNodeInitializer.scala | 4 +- .../sv/onboarding/sv1/SV1Initializer.scala | 4 +- .../splice/sv/store/SvDsoStore.scala | 24 +++++----- .../splice/sv/store/SvSvStore.scala | 10 ++--- .../ValidatorAutomationService.scala | 5 +-- .../validator/store/ValidatorStore.scala | 24 +++++----- ...ExternalPartyWalletAutomationService.scala | 2 - .../UserWalletAutomationService.scala | 1 - .../store/ExternalPartyWalletStore.scala | 24 +++++----- .../splice/wallet/store/UserWalletStore.scala | 24 +++++----- 23 files changed, 118 insertions(+), 168 deletions(-) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SqlIndexInitializationTrigger.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SqlIndexInitializationTrigger.scala index 273d6b6333..543ccbe01d 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SqlIndexInitializationTrigger.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/automation/SqlIndexInitializationTrigger.scala @@ -8,7 +8,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.discard.Implicits.DiscardOps import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, *} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.tracing.TraceContext import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.Materializer @@ -150,23 +150,19 @@ class SqlIndexInitializationTrigger( object SqlIndexInitializationTrigger { def apply( - storage: Storage, + storage: DbStorage, triggerContext: TriggerContext, indexActions: List[IndexAction] = defaultIndexActions, )(implicit ec: ExecutionContextExecutor, tracer: Tracer, mat: Materializer, - ): SqlIndexInitializationTrigger = storage match { - case dbStorage: DbStorage => - new SqlIndexInitializationTrigger( - dbStorage, - triggerContext, - indexActions, - ) - case storageType => - // Same behavior as in `ScanStore.apply` and similar - we only really support DbStorage in our apps. - throw new RuntimeException(s"Unsupported storage type $storageType") + ): SqlIndexInitializationTrigger = { + new SqlIndexInitializationTrigger( + storage, + triggerContext, + indexActions, + ) } sealed trait IndexStatus diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala index c44260490c..45b5de7e4e 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/store/UpdateHistory.scala @@ -45,7 +45,7 @@ import com.digitalasset.canton.config.CantonRequireTypes.String256M import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.{Storage, DbStorage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.google.protobuf.ByteString @@ -2212,25 +2212,21 @@ object UpdateHistory { // Since we're interested in the highest known migration id, we don't need to filter by anything // (store ID, participant ID, etc. are not even known at the time we want to call this). def getHighestKnownMigrationId( - storage: Storage + storage: DbStorage )(implicit ec: ExecutionContext, closeContext: CloseContext, tc: TraceContext, ): Future[Option[Long]] = { - storage match { - case storage: DbStorage => - for { - queryResult <- storage.query( - sql""" + for { + queryResult <- storage.query( + sql""" select max(migration_id) from update_history_last_ingested_offsets """.as[Option[Long]], - "getHighestKnownMigrationId", - ) - } yield { - queryResult.headOption.flatten - } - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") + "getHighestKnownMigrationId", + ) + } yield { + queryResult.headOption.flatten } } diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala index 86053986a5..e16e11ec90 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/automation/ScanAutomationService.scala @@ -9,7 +9,6 @@ import org.lfdecentralizedtrust.splice.automation.{ SpliceAppAutomationService, SqlIndexInitializationTrigger, TxLogBackfillingTrigger, - UpdateIngestionService, } import org.lfdecentralizedtrust.splice.config.UpgradesConfig import org.lfdecentralizedtrust.splice.environment.{RetryProvider, SpliceLedgerClient} @@ -23,11 +22,10 @@ import org.lfdecentralizedtrust.splice.store.{ import org.lfdecentralizedtrust.splice.scan.store.{AcsSnapshotStore, ScanStore} import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.PartyId import io.opentelemetry.api.trace.Tracer -import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import scala.concurrent.ExecutionContextExecutor @@ -40,7 +38,7 @@ class ScanAutomationService( protected val loggerFactory: NamedLoggerFactory, store: ScanStore, val updateHistory: UpdateHistory, - storage: Storage, + storage: DbStorage, snapshotStore: AcsSnapshotStore, ingestFromParticipantBegin: Boolean, ingestUpdateHistoryFromParticipantBegin: Boolean, diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala index b97f47860e..c442d7b577 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/AcsSnapshotStore.scala @@ -20,7 +20,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.resource.DbStorage.Implicits.BuilderChain.toSQLActionBuilderChain import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.tracing.TraceContext @@ -482,16 +482,12 @@ object AcsSnapshotStore { } def apply( - storage: Storage, + storage: DbStorage, updateHistory: UpdateHistory, dsoParty: PartyId, migrationId: Long, loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext, closeContext: CloseContext): AcsSnapshotStore = - storage match { - case db: DbStorage => - new AcsSnapshotStore(db, updateHistory, dsoParty, migrationId, loggerFactory) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new AcsSnapshotStore(storage, updateHistory, dsoParty, migrationId, loggerFactory) } diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala index 9788b37d47..9639691032 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/ScanStore.scala @@ -8,7 +8,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{Member, ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Time.Timestamp @@ -305,7 +305,7 @@ object ScanStore { def apply( key: ScanStore.Key, - storage: Storage, + storage: DbStorage, isFirstSv: Boolean, loggerFactory: NamedLoggerFactory, retryProvider: RetryProvider, @@ -320,28 +320,24 @@ object ScanStore { templateJsonDecoder: TemplateJsonDecoder, close: CloseContext, ): ScanStore = { - storage match { - case db: DbStorage => - new CachingScanStore( - loggerFactory, - retryProvider, - new DbScanStore( - key = key, - db, - isFirstSv, - loggerFactory, - retryProvider, - createScanAggregatesReader, - domainMigrationInfo, - participantId, - metrics, - initialRound, - ), - cacheConfigs, - metrics, - ) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new CachingScanStore( + loggerFactory, + retryProvider, + new DbScanStore( + key = key, + storage, + isFirstSv, + loggerFactory, + retryProvider, + createScanAggregatesReader, + domainMigrationInfo, + participantId, + metrics, + initialRound, + ), + cacheConfigs, + metrics, + ) } def contractFilter( diff --git a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanVerdictStore.scala b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanVerdictStore.scala index d322cc7b0d..a1e5803dc0 100644 --- a/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanVerdictStore.scala +++ b/apps/scan/src/main/scala/org/lfdecentralizedtrust/splice/scan/store/db/DbScanVerdictStore.scala @@ -67,14 +67,10 @@ object DbScanVerdictStore { } def apply( - storage: com.digitalasset.canton.resource.Storage, + storage: com.digitalasset.canton.resource.DbStorage, loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext): DbScanVerdictStore = - storage match { - case db: DbStorage => new DbScanVerdictStore(db, loggerFactory) - case other => - throw new RuntimeException(s"Unsupported storage type $other for DbScanVerdictStore") - } + new DbScanVerdictStore(storage, loggerFactory) } class DbScanVerdictStore( diff --git a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala index 4b7b515057..cc4c9907ab 100644 --- a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala +++ b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/SplitwellApp.scala @@ -9,7 +9,7 @@ import com.digitalasset.canton.config.CantonRequireTypes.InstanceName import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.lifecycle.LifeCycle import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{PartyId, SynchronizerId} import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} @@ -57,7 +57,7 @@ class SplitwellApp( override val name: InstanceName, val config: SplitwellAppBackendConfig, val amuletAppParameters: SharedSpliceAppParameters, - storage: Storage, + storage: DbStorage, override protected val clock: Clock, val loggerFactory: NamedLoggerFactory, tracerProvider: TracerProvider, @@ -251,7 +251,7 @@ class SplitwellApp( object SplitwellApp { case class State( automation: SplitwellAutomationService, - storage: Storage, + storage: DbStorage, store: SplitwellStore, scanConnection: ScanConnection, participantAdminConnection: ParticipantAdminConnection, diff --git a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala index cb7ffbf855..49ee539fdd 100644 --- a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala +++ b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/automation/SplitwellAutomationService.scala @@ -32,7 +32,7 @@ import org.lfdecentralizedtrust.splice.util.QualifiedName import org.lfdecentralizedtrust.splice.scan.admin.api.client.ScanConnection import org.lfdecentralizedtrust.splice.splitwell.store.SplitwellStore import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import io.opentelemetry.api.trace.Tracer import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority @@ -44,7 +44,7 @@ class SplitwellAutomationService( automationConfig: AutomationConfig, clock: Clock, store: SplitwellStore, - storage: Storage, + storage: DbStorage, ledgerClient: SpliceLedgerClient, scanConnection: ScanConnection, retryProvider: RetryProvider, diff --git a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/SplitwellStore.scala b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/SplitwellStore.scala index b639dc5f73..1df9c36b89 100644 --- a/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/SplitwellStore.scala +++ b/apps/splitwell/src/main/scala/org/lfdecentralizedtrust/splice/splitwell/store/SplitwellStore.scala @@ -21,7 +21,7 @@ import org.lfdecentralizedtrust.splice.util.{ import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import org.lfdecentralizedtrust.splice.store.db.AcsInterfaceViewRowData @@ -185,7 +185,7 @@ trait SplitwellStore extends AppStore { object SplitwellStore { def apply( key: Key, - storage: Storage, + storage: DbStorage, domainConfig: SplitwellSynchronizerConfig, loggerFactory: NamedLoggerFactory, retryProvider: RetryProvider, @@ -196,19 +196,15 @@ object SplitwellStore { templateJsonDecoder: TemplateJsonDecoder, close: CloseContext, ): SplitwellStore = - storage match { - case dbStorage: DbStorage => - new DbSplitwellStore( - key, - domainConfig, - dbStorage, - loggerFactory, - retryProvider, - domainMigrationInfo, - participantId, - ) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new DbSplitwellStore( + key, + domainConfig, + storage, + loggerFactory, + retryProvider, + domainMigrationInfo, + participantId, + ) case class Key( providerParty: PartyId diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala index 55790fe56f..1ebe1a89c7 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala @@ -19,7 +19,7 @@ import com.digitalasset.canton.config.{ } import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, FlagCloseableAsync, SyncCloseable} import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.{TraceContext, TracerProvider} @@ -103,7 +103,7 @@ class SvApp( override val name: InstanceName, val config: SvAppBackendConfig, val amuletAppParameters: SharedSpliceAppParameters, - storage: Storage, + storage: DbStorage, override protected val clock: Clock, val loggerFactory: NamedLoggerFactory, tracerProvider: TracerProvider, @@ -776,7 +776,7 @@ object SvApp { case class State( participantAdminConnection: ParticipantAdminConnection, localSynchronizerNode: Option[LocalSynchronizerNode], - storage: Storage, + storage: DbStorage, domainTimeAutomationService: DomainTimeAutomationService, domainParamsAutomationService: DomainParamsAutomationService, svStore: SvSvStore, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala index 2dde7aa68e..a94806271e 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvSvAutomationService.scala @@ -23,7 +23,7 @@ import org.lfdecentralizedtrust.splice.sv.config.SvAppBackendConfig import org.lfdecentralizedtrust.splice.sv.store.{SvDsoStore, SvSvStore} import org.lfdecentralizedtrust.splice.sv.LocalSynchronizerNode import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import io.opentelemetry.api.trace.Tracer import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority @@ -37,7 +37,7 @@ class SvSvAutomationService( config: SvAppBackendConfig, svStore: SvSvStore, dsoStore: SvDsoStore, - storage: Storage, + storage: DbStorage, ledgerClient: SpliceLedgerClient, participantAdminConnection: ParticipantAdminConnection, localSynchronizerNode: Option[LocalSynchronizerNode], diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/NodeInitializerUtil.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/NodeInitializerUtil.scala index 801d38beb1..afcedea1e0 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/NodeInitializerUtil.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/NodeInitializerUtil.scala @@ -30,7 +30,7 @@ import org.lfdecentralizedtrust.splice.sv.store.{SvDsoStore, SvStore, SvSvStore} import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLogging -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.{Spanning, TraceContext} @@ -56,7 +56,7 @@ import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future} trait NodeInitializerUtil extends NamedLogging with Spanning with SynchronizerNodeConfigClient { protected val config: SvAppBackendConfig - protected val storage: Storage + protected val storage: DbStorage protected val retryProvider: RetryProvider protected val clock: Clock protected val domainTimeSync: DomainTimeSynchronization diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/domainmigration/DomainMigrationInitializer.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/domainmigration/DomainMigrationInitializer.scala index 9e464c12b3..b79727c853 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/domainmigration/DomainMigrationInitializer.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/domainmigration/DomainMigrationInitializer.scala @@ -54,7 +54,7 @@ import com.digitalasset.canton.admin.api.client.data.{NodeStatus, WaitingForInit import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.sequencing.SequencerConnections import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.{ParticipantId, SynchronizerId} @@ -85,7 +85,7 @@ class DomainMigrationInitializer( override protected val clock: Clock, override protected val domainTimeSync: DomainTimeSynchronization, override protected val domainUnpausedSync: DomainUnpausedSynchronization, - override protected val storage: Storage, + override protected val storage: DbStorage, override protected val loggerFactory: NamedLoggerFactory, override protected val retryProvider: RetryProvider, override protected val spliceInstanceNamesConfig: SpliceInstanceNamesConfig, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala index 511b4860c5..1fc0200cbc 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/joining/JoiningNodeInitializer.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.config.SynchronizerTimeTrackerConfig import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections} import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.store.TopologyStoreId @@ -88,7 +88,7 @@ class JoiningNodeInitializer( override protected val clock: Clock, override protected val domainTimeSync: DomainTimeSynchronization, override protected val domainUnpausedSync: DomainUnpausedSynchronization, - override protected val storage: Storage, + override protected val storage: DbStorage, override val loggerFactory: NamedLoggerFactory, override protected val retryProvider: RetryProvider, override protected val spliceInstanceNamesConfig: SpliceInstanceNamesConfig, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala index 5a20056470..12207c86f9 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/sv1/SV1Initializer.scala @@ -56,7 +56,7 @@ import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.synchronizer.SynchronizerConnectionConfig import com.digitalasset.canton.protocol.DynamicSynchronizerParameters -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, SequencerConnections, @@ -110,7 +110,7 @@ class SV1Initializer( override protected val clock: Clock, override protected val domainTimeSync: DomainTimeSynchronization, override protected val domainUnpausedSync: DomainUnpausedSynchronization, - override protected val storage: Storage, + override protected val storage: DbStorage, override protected val retryProvider: RetryProvider, override protected val spliceInstanceNamesConfig: SpliceInstanceNamesConfig, override protected val loggerFactory: NamedLoggerFactory, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvDsoStore.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvDsoStore.scala index e40bc9d8aa..6e89979ba9 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvDsoStore.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvDsoStore.scala @@ -44,7 +44,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{Member, ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.MonadUtil @@ -969,7 +969,7 @@ trait SvDsoStore object SvDsoStore { def apply( key: SvStore.Key, - storage: Storage, + storage: DbStorage, loggerFactory: NamedLoggerFactory, retryProvider: RetryProvider, domainMigrationInfo: DomainMigrationInfo, @@ -979,18 +979,14 @@ object SvDsoStore { templateJsonDecoder: TemplateJsonDecoder, closeContext: CloseContext, ): SvDsoStore = { - storage match { - case db: DbStorage => - new DbSvDsoStore( - key, - db, - loggerFactory, - retryProvider, - domainMigrationInfo, - participantId, - ) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new DbSvDsoStore( + key, + storage, + loggerFactory, + retryProvider, + domainMigrationInfo, + participantId, + ) } /** Contract filter of an sv acs store for a specific acs party. */ diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvSvStore.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvSvStore.scala index 9fba164558..b16602958e 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvSvStore.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/store/SvSvStore.scala @@ -19,7 +19,7 @@ import org.lfdecentralizedtrust.splice.sv.store.db.SvTables.SvAcsStoreRowData import org.lfdecentralizedtrust.splice.util.{Contract, TemplateJsonDecoder} import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.tracing.TraceContext import org.lfdecentralizedtrust.splice.store.db.AcsInterfaceViewRowData @@ -93,7 +93,7 @@ trait SvSvStore extends AppStore { object SvSvStore { def apply( key: SvStore.Key, - storage: Storage, + storage: DbStorage, loggerFactory: NamedLoggerFactory, retryProvider: RetryProvider, domainMigrationInfo: DomainMigrationInfo, @@ -103,11 +103,7 @@ object SvSvStore { templateJsonDecoder: TemplateJsonDecoder, closeContext: CloseContext, ): SvSvStore = - storage match { - case db: DbStorage => - new DbSvSvStore(key, db, loggerFactory, retryProvider, domainMigrationInfo, participantId) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new DbSvSvStore(key, storage, loggerFactory, retryProvider, domainMigrationInfo, participantId) /** Contract filter of an sv acs store for a specific acs party. */ def contractFilter(key: SvStore.Key): MultiDomainAcsStore.ContractFilter[ diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala index 417fb57bd6..d0bf80fac6 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/automation/ValidatorAutomationService.scala @@ -7,7 +7,6 @@ import org.lfdecentralizedtrust.splice.automation.{ AutomationServiceCompanion, SpliceAppAutomationService, SqlIndexInitializationTrigger, - UpdateIngestionService, } import org.lfdecentralizedtrust.splice.config.{ AutomationConfig, @@ -37,7 +36,7 @@ import org.lfdecentralizedtrust.splice.wallet.util.ValidatorTopupConfig import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TraceContext import io.opentelemetry.api.trace.Tracer @@ -62,7 +61,7 @@ class ValidatorAutomationService( walletManagerOpt: Option[UserWalletManager], // None when config.enableWallet=false store: ValidatorStore, val updateHistory: UpdateHistory, - storage: Storage, + storage: DbStorage, scanConnection: BftScanConnection, ledgerClient: SpliceLedgerClient, participantAdminConnection: ParticipantAdminConnection, diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/ValidatorStore.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/ValidatorStore.scala index eee133f384..940ad355bf 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/ValidatorStore.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/store/ValidatorStore.scala @@ -27,7 +27,7 @@ import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{ParticipantId, PartyId, SynchronizerId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.daml.lf.data.Time.Timestamp @@ -154,7 +154,7 @@ object ValidatorStore { def apply( key: Key, - storage: Storage, + storage: DbStorage, loggerFactory: NamedLoggerFactory, retryProvider: RetryProvider, domainMigrationInfo: DomainMigrationInfo, @@ -164,18 +164,14 @@ object ValidatorStore { templateJsonDecoder: TemplateJsonDecoder, closeContext: CloseContext, ): ValidatorStore = - storage match { - case storage: DbStorage => - new DbValidatorStore( - key, - storage, - loggerFactory, - retryProvider, - domainMigrationInfo, - participantId, - ) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new DbValidatorStore( + key, + storage, + loggerFactory, + retryProvider, + domainMigrationInfo, + participantId, + ) case class Key( /** The validator party. */ diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala index 5a582cfdfa..d5f01b86f9 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/ExternalPartyWalletAutomationService.scala @@ -6,7 +6,6 @@ package org.lfdecentralizedtrust.splice.wallet.automation import org.lfdecentralizedtrust.splice.automation.{ AutomationServiceCompanion, SpliceAppAutomationService, - UpdateIngestionService, } import AutomationServiceCompanion.TriggerClass import org.lfdecentralizedtrust.splice.config.{AutomationConfig, SpliceParametersConfig} @@ -21,7 +20,6 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.time.Clock import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.Materializer -import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion.SpliceLedgerConnectionPriority import scala.concurrent.ExecutionContext diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala index d3fa963311..f47324c87d 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/automation/UserWalletAutomationService.scala @@ -10,7 +10,6 @@ import org.lfdecentralizedtrust.splice.automation.{ TransferFollowTrigger, TxLogBackfillingTrigger, UnassignTrigger, - UpdateIngestionService, } import AutomationServiceCompanion.{TriggerClass, aTrigger} import org.lfdecentralizedtrust.splice.config.{AutomationConfig, SpliceParametersConfig} diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/ExternalPartyWalletStore.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/ExternalPartyWalletStore.scala index 969b16efef..7af8826968 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/ExternalPartyWalletStore.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/ExternalPartyWalletStore.scala @@ -20,7 +20,7 @@ import org.lfdecentralizedtrust.splice.wallet.store.db.WalletTables.ExternalPart import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.pretty.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext import org.lfdecentralizedtrust.splice.store.db.AcsInterfaceViewRowData @@ -54,7 +54,7 @@ trait ExternalPartyWalletStore extends TransferInputStore with NamedLogging { object ExternalPartyWalletStore { def apply( key: Key, - storage: Storage, + storage: DbStorage, loggerFactory: NamedLoggerFactory, retryProvider: RetryProvider, domainMigrationInfo: DomainMigrationInfo, @@ -64,18 +64,14 @@ object ExternalPartyWalletStore { templateJsonDecoder: TemplateJsonDecoder, close: CloseContext, ): ExternalPartyWalletStore = { - storage match { - case dbStorage: DbStorage => - new DbExternalPartyWalletStore( - key, - dbStorage, - loggerFactory, - retryProvider, - domainMigrationInfo, - participantId, - ) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new DbExternalPartyWalletStore( + key, + storage, + loggerFactory, + retryProvider, + domainMigrationInfo, + participantId, + ) } case class Key( diff --git a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/UserWalletStore.scala b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/UserWalletStore.scala index 2a13a18512..ae54ad0fda 100644 --- a/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/UserWalletStore.scala +++ b/apps/wallet/src/main/scala/org/lfdecentralizedtrust/splice/wallet/store/UserWalletStore.scala @@ -40,7 +40,7 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.logging.pretty.* import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.resource.{DbStorage, Storage} +import com.digitalasset.canton.resource.DbStorage import com.digitalasset.canton.topology.{ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext import io.grpc.Status @@ -476,7 +476,7 @@ object UserWalletStore { def apply( key: Key, - storage: Storage, + storage: DbStorage, loggerFactory: NamedLoggerFactory, retryProvider: RetryProvider, domainMigrationInfo: DomainMigrationInfo, @@ -486,18 +486,14 @@ object UserWalletStore { templateJsonDecoder: TemplateJsonDecoder, close: CloseContext, ): UserWalletStore = { - storage match { - case dbStorage: DbStorage => - new DbUserWalletStore( - key, - dbStorage, - loggerFactory, - retryProvider, - domainMigrationInfo, - participantId, - ) - case storageType => throw new RuntimeException(s"Unsupported storage type $storageType") - } + new DbUserWalletStore( + key, + storage, + loggerFactory, + retryProvider, + domainMigrationInfo, + participantId, + ) } case class Key( From c1a2fcf91fc9a7a565f76a13f8bb30f6aef65ce9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Tue, 14 Oct 2025 13:03:14 +0000 Subject: [PATCH 05/10] [ci] Docs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz --- docs/src/release_notes.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/src/release_notes.rst b/docs/src/release_notes.rst index 8277005fd9..58d75b72b8 100644 --- a/docs/src/release_notes.rst +++ b/docs/src/release_notes.rst @@ -8,6 +8,15 @@ Release Notes ============= +Upcoming +-------- + + - SV + + - The SV app will no longer store the update history and such, will not be able to answer historical queries. + All updates involving the DSO party will still be stored and returned by Scan. + + 0.4.20 ------ From 2ff977b0e26a23a58569b2f6ba35498afa4e336e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Tue, 14 Oct 2025 13:13:19 +0000 Subject: [PATCH 06/10] [ci] run pls gh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz From 4a38d9f497e522acb9e2e8e1a2a14b84c166b1f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Wed, 15 Oct 2025 10:49:05 +0000 Subject: [PATCH 07/10] [ci] truncate tables for sv app MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz --- .../stable/V047__truncate_sv_app_history.sql | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V047__truncate_sv_app_history.sql diff --git a/apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V047__truncate_sv_app_history.sql b/apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V047__truncate_sv_app_history.sql new file mode 100644 index 0000000000..c1ae692060 --- /dev/null +++ b/apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V047__truncate_sv_app_history.sql @@ -0,0 +1,26 @@ +-- if the only 2 store descriptors belong to the SV app, that means we can truncate the update_history tables + +DO $$ +DECLARE + descriptors TEXT[]; +BEGIN + +-- array equality (ordered) ensures that exactly these two, no more, no less, are there <=> it's the SV app +select array_agg(store_name order by store_name) into descriptors +from update_history_descriptors; + +IF descriptors = '{"DbSvDsoStore", "DbSvSvStore"}' THEN + RAISE NOTICE 'Truncating update history tables as only SV app descriptors are present. Descriptors: %', descriptors::text; + EXECUTE 'TRUNCATE TABLE update_history_assignments CASCADE'; + EXECUTE 'TRUNCATE TABLE update_history_unassignments CASCADE'; + EXECUTE 'TRUNCATE TABLE update_history_backfilling CASCADE'; + EXECUTE 'TRUNCATE TABLE update_history_creates CASCADE'; + EXECUTE 'TRUNCATE TABLE update_history_exercises CASCADE'; + EXECUTE 'TRUNCATE TABLE update_history_transactions CASCADE'; + EXECUTE 'TRUNCATE TABLE update_history_last_ingested_offsets CASCADE'; + EXECUTE 'TRUNCATE TABLE update_history_descriptors CASCADE'; +ELSE + RAISE NOTICE 'This is not the SV app, NOT truncating update history tables. Descriptors: %', descriptors::text; +END IF; + +END $$; From 2a9966193cfdc23b34a666affb767c7685376ce4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Mon, 20 Oct 2025 11:49:36 +0000 Subject: [PATCH 08/10] fix migration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz --- ..._history.sql => V048__truncate_sv_splitwell_history.sql} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename apps/common/src/main/resources/db/migration/canton-network/postgres/stable/{V047__truncate_sv_app_history.sql => V048__truncate_sv_splitwell_history.sql} (71%) diff --git a/apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V047__truncate_sv_app_history.sql b/apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V048__truncate_sv_splitwell_history.sql similarity index 71% rename from apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V047__truncate_sv_app_history.sql rename to apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V048__truncate_sv_splitwell_history.sql index c1ae692060..cb73f00200 100644 --- a/apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V047__truncate_sv_app_history.sql +++ b/apps/common/src/main/resources/db/migration/canton-network/postgres/stable/V048__truncate_sv_splitwell_history.sql @@ -9,8 +9,8 @@ BEGIN select array_agg(store_name order by store_name) into descriptors from update_history_descriptors; -IF descriptors = '{"DbSvDsoStore", "DbSvSvStore"}' THEN - RAISE NOTICE 'Truncating update history tables as only SV app descriptors are present. Descriptors: %', descriptors::text; +IF (descriptors = '{"DbSvDsoStore", "DbSvSvStore"}' OR descriptors = '{"DbSplitwellStore"}') THEN + RAISE NOTICE 'Truncating update history tables as only SV/Splitwell app descriptors are present. Descriptors: %', descriptors::text; EXECUTE 'TRUNCATE TABLE update_history_assignments CASCADE'; EXECUTE 'TRUNCATE TABLE update_history_unassignments CASCADE'; EXECUTE 'TRUNCATE TABLE update_history_backfilling CASCADE'; @@ -20,7 +20,7 @@ IF descriptors = '{"DbSvDsoStore", "DbSvSvStore"}' THEN EXECUTE 'TRUNCATE TABLE update_history_last_ingested_offsets CASCADE'; EXECUTE 'TRUNCATE TABLE update_history_descriptors CASCADE'; ELSE - RAISE NOTICE 'This is not the SV app, NOT truncating update history tables. Descriptors: %', descriptors::text; + RAISE NOTICE 'This is not the SV or Splitwell app, NOT truncating update history tables. Descriptors: %', descriptors::text; END IF; END $$; From 3f938d60d85c35f6278eb49eee0384332bc576da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Mon, 20 Oct 2025 12:05:31 +0000 Subject: [PATCH 09/10] [ci] chekc update history of externals MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Oriol Muñoz --- .../tests/WalletTxLogIntegrationTest.scala | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala index ca849bfa65..df519e03b8 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/WalletTxLogIntegrationTest.scala @@ -7,7 +7,7 @@ import org.lfdecentralizedtrust.splice.codegen.java.splice.wallet.subscriptions import org.lfdecentralizedtrust.splice.config.ConfigTransforms import org.lfdecentralizedtrust.splice.integration.EnvironmentDefinition import org.lfdecentralizedtrust.splice.integration.tests.SpliceTests.IntegrationTestWithSharedEnvironment -import org.lfdecentralizedtrust.splice.store.Limit +import org.lfdecentralizedtrust.splice.store.{Limit, PageLimit} import org.lfdecentralizedtrust.splice.sv.automation.delegatebased.AnsSubscriptionRenewalPaymentTrigger import org.lfdecentralizedtrust.splice.sv.config.InitialAnsConfig import org.lfdecentralizedtrust.splice.util.{ @@ -1226,12 +1226,27 @@ class WalletTxLogIntegrationTest logEntry.senderHoldingFees should beWithin(0, smallAmount) logEntry.amuletPrice shouldBe amuletPrice } + val expectedTxLogEntries = Seq(renewTxLog, creationTxLog, tapTxLog) checkTxHistory( bobValidatorWalletClient, - Seq(renewTxLog, creationTxLog, tapTxLog), + expectedTxLogEntries, trafficTopups = IgnoreTopupsDevNet, ) + clue("Check UpdateHistory works for external parties") { + inside( + bobValidatorBackend.appState.walletManager + .valueOrFail("WalletManager is expected to be defined") + .externalPartyWalletManager + .lookupExternalPartyWallet(onboarding.party) + .valueOrFail(s"Expected ${onboarding.party} to have an external party wallet") + .updateHistory + .getAllUpdates(None, PageLimit.Max) + .futureValue + ) { history => + history.size should be >= expectedTxLogEntries.size + } + } } "handle failed automation (direct transfer)" in { implicit env => From 6163ffdab7ac5f104a82e1e4076c0b5a354e6314 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Oriol=20Mu=C3=B1oz?= Date: Mon, 20 Oct 2025 14:38:27 +0200 Subject: [PATCH 10/10] Make scanConfig mandatory in SV app (#2686) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make scanConfig mandatory in SV app [ci] let's see what breaks Signed-off-by: Oriol Muñoz * [ci] docs Signed-off-by: Oriol Muñoz * [ci] run pls gh Signed-off-by: Oriol Muñoz * update helm json schema Signed-off-by: Oriol Muñoz * [ci] release notes fix Signed-off-by: Oriol Muñoz --------- Signed-off-by: Oriol Muñoz --- .../sv/admin/http/HttpSvAdminHandler.scala | 36 +++++++------------ .../automation/SvDsoAutomationService.scala | 18 +++++----- .../singlesv/SequencerPruningTrigger.scala | 32 ++++++----------- .../splice/sv/config/SvAppConfig.scala | 2 +- .../SynchronizerNodeReconciler.scala | 6 ++-- .../splice/sv/util/SvUtil.scala | 4 +-- .../helm/splice-sv-node/values.schema.json | 4 ++- docs/src/release_notes.rst | 5 +++ 8 files changed, 43 insertions(+), 64 deletions(-) diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala index 20a30979c1..d4d71560e8 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvAdminHandler.scala @@ -58,7 +58,6 @@ import com.digitalasset.canton.time.Clock import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil import io.circe.syntax.EncoderOps -import io.grpc.Status import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.Materializer import org.lfdecentralizedtrust.splice.config.{NetworkAppClientConfig, UpgradesConfig} @@ -110,29 +109,18 @@ class HttpSvAdminHandler( // Similar to PublishScanConfigTrigger, this class creates its own scan connection // on demand, because scan might not be available at application startup. - private def createScanConnection(): Future[ScanConnection] = - config.scan match { - case None => - Future.failed( - Status.UNAVAILABLE - .withDescription( - "This application is not configured to connect to a scan service. " + - " Check the application configuration or use the scan API to query votes information." - ) - .asRuntimeException() - ) - case Some(scanConfig) => - implicit val tc: TraceContext = TraceContext.empty - ScanConnection - .singleUncached( - ScanAppClientConfig(NetworkAppClientConfig(scanConfig.internalUrl)), - upgradesConfig, - clock, - retryProvider, - loggerFactory, - retryConnectionOnInitialFailure = true, - ) - } + private def createScanConnection(): Future[ScanConnection] = { + implicit val tc: TraceContext = TraceContext.empty + ScanConnection + .singleUncached( + ScanAppClientConfig(NetworkAppClientConfig(config.scan.internalUrl)), + upgradesConfig, + clock, + retryProvider, + loggerFactory, + retryConnectionOnInitialFailure = true, + ) + } @SuppressWarnings(Array("org.wartremover.warts.Var")) private var scanConnectionV: Option[Future[ScanConnection]] = None private def scanConnectionF: Future[ScanConnection] = blocking { diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala index 71d9f4132f..32d368fd0f 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala @@ -374,17 +374,15 @@ class SvDsoAutomationService( ) ) - config.scan.foreach { scan => - registerTrigger( - new PublishScanConfigTrigger( - triggerContext, - dsoStore, - connection(SpliceLedgerConnectionPriority.Low), - scan, - upgradesConfig, - ) + registerTrigger( + new PublishScanConfigTrigger( + triggerContext, + dsoStore, + connection(SpliceLedgerConnectionPriority.Low), + config.scan, + upgradesConfig, ) - } + ) config.followAmuletConversionRateFeed.foreach { c => registerTrigger( diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala index 572f96f26c..0787679615 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SequencerPruningTrigger.scala @@ -41,7 +41,7 @@ import scala.concurrent.{ExecutionContextExecutor, Future} class SequencerPruningTrigger( override protected val context: TriggerContext, store: SvDsoStore, - scanConfig: Option[SvScanConfig], + scanConfig: SvScanConfig, upgradesConfig: UpgradesConfig, sequencerAdminConnection: SequencerAdminConnection, mediatorAdminConnection: MediatorAdminConnection, @@ -65,27 +65,15 @@ class SequencerPruningTrigger( private def createScanConnection()(implicit tc: TraceContext ): Future[BackfillingScanConnection] = - scanConfig match { - case None => - Future.failed( - Status.UNAVAILABLE - .withDescription( - "This application is not configured to connect to a scan service. " + - " Check the application configuration or use the scan API to query votes information." - ) - .asRuntimeException() - ) - case Some(scanConfig) => - ScanConnection - .singleUncached( - ScanAppClientConfig(NetworkAppClientConfig(scanConfig.internalUrl)), - upgradesConfig, - clock, - context.retryProvider, - loggerFactory, - retryConnectionOnInitialFailure = true, - ) - } + ScanConnection + .singleUncached( + ScanAppClientConfig(NetworkAppClientConfig(scanConfig.internalUrl)), + upgradesConfig, + clock, + context.retryProvider, + loggerFactory, + retryConnectionOnInitialFailure = true, + ) override def performWorkIfAvailable()(implicit traceContext: TraceContext): Future[Boolean] = for { diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala index 58b1a449cb..6235938fef 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/config/SvAppConfig.scala @@ -284,7 +284,7 @@ case class SvAppBackendConfig( initialAmuletPriceVote: Option[BigDecimal] = None, cometBftConfig: Option[SvCometBftConfig] = None, localSynchronizerNode: Option[SvSynchronizerNodeConfig], - scan: Option[SvScanConfig], + scan: SvScanConfig, participantBootstrappingDump: Option[ParticipantBootstrapDumpConfig] = None, identitiesDump: Option[BackupDumpConfig] = None, domainMigrationDumpPath: Option[Path] = None, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/SynchronizerNodeReconciler.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/SynchronizerNodeReconciler.scala index 71d6301f24..0cf0f807db 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/SynchronizerNodeReconciler.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/onboarding/SynchronizerNodeReconciler.scala @@ -46,7 +46,7 @@ class SynchronizerNodeReconciler( synchronizerId: SynchronizerId, state: SynchronizerNodeState, migrationId: Long, - scan: Option[SvScanConfig], + scanConfig: SvScanConfig, )(implicit ec: ExecutionContext, tc: TraceContext, @@ -54,9 +54,7 @@ class SynchronizerNodeReconciler( def setConfigIfRequired() = for { localSequencerConfig <- SvUtil.getSequencerConfig(synchronizerNode, migrationId) localMediatorConfig <- SvUtil.getMediatorConfig(synchronizerNode) - localScanConfig = scan - .map(scanConfig => new ScanConfig(scanConfig.publicUrl.toString())) - .toJava + localScanConfig = java.util.Optional.of(new ScanConfig(scanConfig.publicUrl.toString())) rulesAndState <- dsoStore.getDsoRulesWithSvNodeState(svParty) nodeState = rulesAndState.svNodeState.payload // TODO(DACH-NY/canton-network-node#4901): do not use default, but reconcile all configured domains diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala index 90f6a6a70f..7e061c820f 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/util/SvUtil.scala @@ -180,7 +180,7 @@ object SvUtil { def getSV1SynchronizerNodeConfig( cometBftNode: Option[CometBftNode], localSynchronizerNode: LocalSynchronizerNode, - scanConfig: Option[SvScanConfig], + scanConfig: SvScanConfig, synchronizerId: SynchronizerId, clock: Clock, migrationId: Long, @@ -234,7 +234,7 @@ object SvUtil { cometBftConfig, sequencerConfig.toJava, mediatorConfig.toJava, - scanConfig.map(c => new ScanConfig(c.publicUrl.toString())).toJava, + Optional.of(new ScanConfig(scanConfig.publicUrl.toString())), Optional.empty(), ) ).asJava diff --git a/cluster/helm/splice-sv-node/values.schema.json b/cluster/helm/splice-sv-node/values.schema.json index 723c72030d..96521950d6 100644 --- a/cluster/helm/splice-sv-node/values.schema.json +++ b/cluster/helm/splice-sv-node/values.schema.json @@ -43,6 +43,7 @@ "defaultJvmOptions", "domain", "imageRepo", + "scan", "nodeIdentifier", "onboardingName", "persistence", @@ -287,7 +288,8 @@ "internalUrl": { "type": "string" } - } + }, + "required": ["publicUrl", "internalUrl"] }, "nodeIdentifier": { "type": "string" diff --git a/docs/src/release_notes.rst b/docs/src/release_notes.rst index 75e96e2941..88dec1f2f9 100644 --- a/docs/src/release_notes.rst +++ b/docs/src/release_notes.rst @@ -16,6 +16,11 @@ Upcoming - The SV app will no longer store the update history and such, will not be able to answer historical queries. All updates involving the DSO party will still be stored and returned by Scan. + - Deployment + + - The helm values under ``scan``, that is ``publicUrl`` and ``internalUrl`` are now mandatory. + All SVs already deploy scan on DevNet, TestNet and MainNet so this should have no impact. + 0.4.21 ------