From 3fee3807ffb7468403dc9197aaec73534da7b0c1 Mon Sep 17 00:00:00 2001 From: canton-machine <48923836+canton-machine@users.noreply.github.com> Date: Wed, 20 Mar 2024 07:32:49 +0100 Subject: [PATCH] Update 2024-03-19.23 (#146) Reference commit: 6ad9b204b1 Co-authored-by: Canton --- .../domain/v30/sequencer_connection.proto | 11 + .../participant/v30/domain_connectivity.proto | 12 +- ...rpriseMediatorAdministrationCommands.scala | 6 +- .../EnterpriseSequencerAdminCommands.scala | 149 ++- ...riseSequencerConnectionAdminCommands.scala | 15 +- .../commands/ParticipantAdminCommands.scala | 22 +- .../commands/SequencerAdminCommands.scala | 2 +- .../api/client/data/DomainParameters.scala | 12 +- .../data/TrafficControlParameters.scala | 2 + .../canton/config/CantonConfig.scala | 15 +- .../console/ConsoleEnvironmentBinding.scala | 1 + .../canton/console/ConsoleMacros.scala | 28 +- .../canton/console/InstanceReference.scala | 10 +- .../ParticipantReferencesExtensions.scala | 9 +- .../commands/HealthAdministration.scala | 4 +- .../commands/LedgerApiAdministration.scala | 4 +- .../MediatorAdministrationGroup.scala | 7 +- .../commands/ParticipantAdministration.scala | 94 +- .../ParticipantRepairAdministration.scala | 7 +- .../SequencerConnectionAdministration.scala | 26 +- .../SequencerNodeAdministration.scala | 90 +- .../commands/TopologyAdministrationX.scala | 257 ++-- .../environment/CommunityEnvironment.scala | 2 +- .../canton/environment/Environment.scala | 5 +- .../canton/environment/Nodes.scala | 6 +- .../canton/metrics/CsvReporter.scala | 150 +-- .../metrics/FilteringMetricsReader.scala | 50 + .../canton/metrics/MetricsRegistry.scala | 132 +- .../pack/config/monitoring/prometheus.conf | 2 +- .../src/pack/config/monitoring/tracing.conf | 4 +- .../examples/01-simple-topology/README.md | 2 +- .../simple-topology-x-2.conf | 13 - ...e-topology-x.conf => simple-topology.conf} | 0 .../composability-auto-transfer.canton | 4 +- .../05-composability/composability1.canton | 4 +- .../05-composability/composability2.canton | 4 +- .../config/CantonCommunityConfigTest.scala | 4 +- ...iseFeatureInCommunityIntegrationTest.scala | 2 +- .../tests/ExampleIntegrationTest.scala | 2 +- ...implestPingXCommunityIntegrationTest.scala | 2 +- .../tests/release/CliIntegrationTest.scala | 4 +- .../metrics/LabeledMetricsFactoryTest.scala | 1 - .../protocol/v30/domain_parameters.proto | 1 + .../canton/protocol/v30/sequencing.proto | 2 - .../canton/protocol/v30/topology.proto | 6 +- .../v30/traffic_control_parameters.proto | 4 +- .../canton/ProtoDeserializationError.scala | 1 - .../canton/config/ProcessingTimeouts.scala | 2 - .../canton/config/ServerConfig.scala | 3 - .../canton/crypto/CryptoApi.scala | 5 +- .../canton/crypto/Encryption.scala | 2 +- .../canton/data/RepairContract.scala | 8 +- .../canton/data/TransferInViewTree.scala | 21 +- .../canton/data/TransferOutViewTree.scala | 6 +- .../canton/health/ComponentHealthState.scala | 4 + .../canton/health/HealthComponent.scala | 2 + .../lifecycle/FutureUnlessShutdown.scala | 7 +- .../canton/metrics/MetricValue.scala | 14 +- .../metrics/SequencerClientMetrics.scala | 4 +- .../canton/networking/Endpoint.scala | 4 + .../networking/grpc/CantonGrpcUtil.scala | 4 +- .../networking/grpc/CantonServerBuilder.scala | 7 - .../com/digitalasset/canton/package.scala | 4 - .../canton/protocol/DomainParameters.scala | 41 +- .../protocol/HasSerializableContract.scala | 4 +- .../canton/protocol/package.scala | 2 - .../sequencing/SequencerAggregator.scala | 1 + .../sequencing/SequencerConnection.scala | 3 +- .../sequencing/SequencerConnections.scala | 34 + .../sequencing/TrafficControlParameters.scala | 6 + .../grpc/AuthenticationTokenManager.scala | 65 +- .../client/SequencerClientFactory.scala | 23 +- .../SequencerClientTransportFactory.scala | 132 +- .../client/SequencerTransportState.scala | 2 + .../sequencing/protocol/TrafficState.scala | 10 +- .../memory/InMemorySequencedEventStore.scala | 22 +- .../digitalasset/canton/topology/Member.scala | 3 + ...pologyStateForInititalizationService.scala | 9 +- .../topology/store/TopologyStoreX.scala | 18 +- .../TopologyTransactionCollectionX.scala | 10 + .../topology/store/db/DbTopologyStoreX.scala | 48 +- .../store/memory/InMemoryTopologyStoreX.scala | 54 +- .../transaction/TopologyTransactionX.scala | 6 +- .../TrafficBalanceSubmissionHandler.scala | 5 + .../canton/util/IterableUtil.scala | 27 + .../digitalasset/canton/util/LoggerUtil.scala | 8 +- .../digitalasset/canton/util/SeqUtil.scala | 15 - .../javaapi/data/GetPackageResponse.java | 2 +- .../data/GetPackageStatusResponse.java | 4 +- .../javaapi/data/codegen/ValueDecoder.java | 21 - .../src/main/daml/CantonExamples/daml.yaml | 2 +- .../canton/h2/stable/V1_1__initial.sql | 11 +- .../canton/postgres/stable/V1_1__initial.sql | 11 +- .../domain/grpc/SequencerInfoLoader.scala | 353 +++-- .../provider/jce/JcePrivateCrypto.scala | 1 + .../provider/tink/TinkPrivateCrypto.scala | 2 + .../MediatorGroupDeltaComputations.scala | 81 ++ .../com/digitalasset/canton/Generators.scala | 4 +- .../domain/grpc/SequencerInfoLoaderTest.scala | 191 +++ .../canton/data/GeneratorsTransferData.scala | 4 +- .../canton/protocol/GeneratorsProtocol.scala | 5 +- .../MediatorGroupDeltaComputationsTest.scala | 236 ++++ .../topology/store/TopologyStoreXTest.scala | 46 +- .../canton/util/IterableUtilTest.scala | 48 + .../canton/util/SeqUtilTest.scala | 40 - .../demo/src/main/daml/ai-analysis/daml.yaml | 2 +- community/demo/src/main/daml/bank/daml.yaml | 2 +- community/demo/src/main/daml/doctor/daml.yaml | 2 +- .../src/main/daml/health-insurance/daml.yaml | 2 +- .../src/main/daml/medical-records/daml.yaml | 2 +- .../canton/demo/ReferenceDemoScript.scala | 10 +- community/domain/src/main/protobuf/buf.yaml | 17 +- .../v30/mediator_administration_service.proto | 2 +- .../v30/mediator_initialization_service.proto | 5 +- .../v30/sequencer_connection_service.proto | 5 +- .../canton/mediator/scalapb/package.proto | 14 + .../sequencer_administration_service.proto | 64 +- .../sequencer_initialization_service.proto | 27 +- .../sequencer_initialization_snapshot.proto | 2 +- ...encer_pruning_administration_service.proto | 2 +- .../admin/v30/sequencer_version_service.proto | 2 +- .../canton/sequencer/scalapb/package.proto | 14 + .../block/BlockSequencerStateManager.scala | 347 ++--- .../{BlockUpdates.scala => BlockUpdate.scala} | 45 +- .../domain/block/BlockUpdateGenerator.scala | 510 +++---- .../domain/block/LedgerBlockEvent.scala | 2 - .../block/data/BlockEphemeralState.scala | 16 +- .../data/BlockUpdateEphemeralState.scala | 59 + .../domain/block/data/EphemeralState.scala | 72 +- .../block/data/SequencerBlockStore.scala | 21 +- .../block/data/db/DbSequencerBlockStore.scala | 26 +- .../memory/InMemorySequencerBlockStore.scala | 10 +- .../config/DomainParametersConfig.scala | 6 +- .../canton/domain/mediator/Mediator.scala | 4 +- .../canton/domain/mediator/MediatorNode.scala | 597 ++++++++- .../domain/mediator/MediatorNodeCommon.scala | 303 ----- .../domain/mediator/MediatorNodeX.scala | 364 ----- .../mediator/MediatorReplicaManager.scala | 8 +- .../mediator/MediatorRuntimeFactory.scala | 38 +- .../gprc/InitializeMediatorRequest.scala | 12 +- .../gprc/InitializeMediatorResponse.scala | 2 +- .../GrpcMediatorInitializationServiceX.scala | 10 +- .../sequencing/SequencerNodeCommon.scala | 8 +- .../domain/sequencing/SequencerNodeX.scala | 30 +- .../domain/sequencing/SequencerRuntime.scala | 2 +- .../SequencerRuntimeForSeparateNode.scala | 19 +- .../grpc/InitializeSequencerRequest.scala | 97 +- .../grpc/InitializeSequencerResponse.scala | 17 +- .../MemberAuthenticationStore.scala | 59 +- .../OnboardingStateForSequencer.scala | 82 ++ .../sequencer/SequencerPruningStatus.scala | 95 +- .../sequencer/SequencerSnapshot.scala | 6 +- .../sequencer/block/BlockSequencer.scala | 146 ++- .../block/BlockSequencerFactory.scala | 28 +- .../traffic/MemberTrafficSnapshot.scala | 2 +- .../GrpcSequencerAdministrationService.scala | 187 ++- .../GrpcSequencerInitializationServiceX.scala | 110 +- ...equencerPruningAdministrationService.scala | 2 +- .../service/GrpcSequencerVersionService.scala | 4 +- .../sequencing/traffic/TrafficBalance.scala | 2 +- .../GrpcSequencerConnectionService.scala | 77 +- .../state/EphemeralStateTest.scala | 52 - .../SequencerStateManagerStoreTest.scala | 28 +- .../sequencer/block/BlockSequencerTest.scala | 55 +- ...ommunityReferenceBlockOrdererFactory.scala | 7 +- .../reference/ReferenceBlockOrderer.scala | 43 +- .../store/DbReferenceBlockOrderingStore.scala | 6 +- .../store/ReferenceBlockOrderingStore.scala | 8 +- .../ReferenceBlockOrderingStoreTest.scala | 10 +- .../CommunityEnvironmentDefinition.scala | 3 - .../main/scala/com/daml/http/perf/Main.scala | 3 +- .../failurelib/scala/http/FailureTests.scala | 6 +- .../scala/http/HttpTestFixture.scala | 1 - .../scala/http/ToxicSandboxFixture.scala | 3 - .../ledger/api/auth/AuthorizationError.scala | 5 - .../canton/ledger/api/auth/Authorizer.scala | 2 - .../canton/ledger/api/auth/Claims.scala | 11 +- .../AuthorizationInterceptor.scala | 1 - .../completion/CompletionStreamRequest.scala | 3 +- .../CompletionServiceRequestValidator.scala | 1 - .../api/validation/FieldValidator.scala | 23 +- .../common/PureConfigReaderWriter.scala | 31 +- .../canton/platform/DispatcherState.scala | 39 +- .../platform/apiserver/ApiServiceOwner.scala | 9 +- .../apiserver/services/logging/package.scala | 4 - .../platform/index/IndexServiceImpl.scala | 2 - .../platform/index/IndexServiceOwner.scala | 3 - .../platform/indexer/ha/HaCoordinator.scala | 27 +- .../platform/indexer/ha/PollingChecker.scala | 4 +- .../indexer/ha/PreemptableSequence.scala | 19 +- .../platform/store/cache/StateCache.scala | 1 + .../platform/store/dao/JdbcLedgerDao.scala | 5 +- .../canton/platform/store/dao/LedgerDao.scala | 6 +- .../store/utils/ConcurrencyLimiter.scala | 15 +- .../ledger/api/auth/AuthorizerSpec.scala | 1 - .../StreamAuthorizationComponentSpec.scala | 3 - ...ompletionServiceRequestValidatorTest.scala | 5 +- .../SubmitRequestValidatorTest.scala | 9 - .../runner/common/ArbitraryConfig.scala | 20 - .../common/PureConfigReaderWriterSpec.scala | 47 +- .../canton/platform/IndexComponentTest.scala | 3 - .../command/CommandServiceImplSpec.scala | 2 - .../validation/ErrorFactoriesSpec.scala | 22 - .../RecoveringIndexerIntegrationSpec.scala | 12 +- .../indexer/ha/EndlessReadService.scala | 2 - .../store/dao/JdbcLedgerDaoBackend.scala | 12 +- .../store/testing/oracle/OracleAround.scala | 2 +- .../testing/postgresql/PostgresAround.scala | 2 +- .../ledger/indexerbenchmark/Config.scala | 9 - .../indexerbenchmark/IndexerBenchmark.scala | 36 +- .../src/main/daml/carbonv1/daml.yaml | 2 +- .../src/main/daml/carbonv2/daml.yaml | 2 +- .../src/main/daml/experimental/daml.yaml | 2 +- .../src/main/daml/model/daml.yaml | 2 +- .../main/daml/package_management/daml.yaml | 2 +- .../src/main/daml/semantic/daml.yaml | 2 +- .../src/main/daml/upgrade/1.0.0/daml.yaml | 2 +- .../src/main/daml/upgrade/2.0.0/daml.yaml | 2 +- .../src/main/daml/upgrade/3.0.0/daml.yaml | 2 +- .../canton/ledger/api/domain/package.scala | 7 - .../ledger/api/refinements/ApiTypes.scala | 4 - .../ledger/api/tls/DecryptionParameters.scala | 107 -- .../canton/ledger/api/tls/SecretsUrl.scala | 25 - .../ledger/api/tls/TlsConfiguration.scala | 19 +- .../canton/ledger/configuration/package.scala | 7 +- .../groups/RequestValidationErrors.scala | 19 - .../canton/metrics/IndexDBMetrics.scala | 1 - .../canton/metrics/ServicesMetrics.scala | 15 +- .../test-models/daml-lf/encoder/test-2.1.dar | Bin 4999 -> 4792 bytes .../daml-lf/encoder/test-2.dev.dar | Bin 5751 -> 5759 bytes .../api/tls/DecryptionParametersTest.scala | 131 -- .../ledger/api/tls/SecretsUrlTest.scala | 61 - .../ledger/api/tls/TlsConfigurationTest.scala | 39 - .../subpackage/MildErrorsParent.scala | 6 +- .../util/IdentifierConverters.scala | 2 +- .../com/digitalasset/canton/http/domain.scala | 4 - .../http/endpoints/CreateAndExercise.scala | 2 +- .../canton/http/endpoints/RouteSetup.scala | 2 +- .../canton/http/json/DomainJsonDecoder.scala | 2 +- .../canton/http/json/DomainJsonEncoder.scala | 2 +- .../canton/http/json/JsonProtocol.scala | 2 - .../canton/http/util/ClientUtil.scala | 2 +- .../canton/http/util/Commands.scala | 1 - .../http/util/IdentifierConverters.scala | 2 +- .../pureconfigutils/SharedConfigReaders.scala | 71 +- .../src/test/daml/v2_1/daml.yaml | 2 +- .../src/test/daml/v2_dev/daml.yaml | 2 +- .../canton/http/PackageServiceTest.scala | 2 +- .../digitalasset/canton/RequireBlocking.scala | 6 +- .../canton/SynchronizedFuture.scala | 99 ++ .../canton/RequireBlockingTest.scala | 4 +- .../canton/SynchronizedFutureTest.scala | 105 ++ community/participant/src/main/daml/daml.yaml | 2 +- .../participant/ParticipantNodeCommon.scala | 33 +- .../canton/participant/ParticipantNodeX.scala | 12 +- .../admin/DomainConnectivityService.scala | 228 ---- .../admin/data/ActiveContract.scala | 16 +- .../grpc/GrpcDomainConnectivityService.scala | 259 +++- .../admin/inspection/AcsInspection.scala | 16 +- .../inspection/SyncStateInspection.scala | 6 +- .../admin/repair/ChangeAssignation.scala | 55 +- .../admin/repair/EnsureValidContractIds.scala | 67 +- .../admin/repair/RepairService.scala | 94 +- .../config/LocalParticipantConfig.scala | 5 +- .../participant/domain/DomainRegistry.scala | 2 + .../domain/grpc/GrpcDomainRegistry.scala | 8 +- .../participant/event/AcsChangeListener.scala | 144 +- .../event/RecordOrderPublisher.scala | 15 +- .../api/CantonLedgerApiServerWrapper.scala | 3 - .../StartableStoppableLedgerApiServer.scala | 7 +- .../ledger/api/client/LedgerConnection.scala | 4 +- .../metrics/ParticipantMetrics.scala | 6 +- .../participant/metrics/PruningMetrics.scala | 24 +- .../protocol/Phase37Synchronizer.scala | 1 + .../protocol/RepairProcessor.scala | 2 +- .../protocol/TransactionProcessingSteps.scala | 4 +- .../conflictdetection/CommitSet.scala | 19 +- .../conflictdetection/ConflictDetector.scala | 34 +- .../conflictdetection/LockableStatus.scala | 4 +- .../protocol/transfer/TransferData.scala | 4 +- .../transfer/TransferInProcessingSteps.scala | 13 +- .../transfer/TransferInValidation.scala | 6 +- .../transfer/TransferOutProcessingSteps.scala | 22 +- .../transfer/TransferOutRequest.scala | 6 +- .../TransactionValidationResult.scala | 5 +- .../pruning/AcsCommitmentProcessor.scala | 784 +++++++---- ...vationsDeactivationsConsistencyCheck.scala | 157 +++ .../store/ActiveContractStore.scala | 405 ++++-- .../store/SyncDomainPersistentState.scala | 9 +- .../store/data/ActiveContractsData.scala | 15 +- .../store/db/DbActiveContractStore.scala | 912 ++++++------- .../db/DbSyncDomainPersistentState.scala | 2 +- .../memory/InMemoryAcsCommitmentStore.scala | 3 +- .../memory/InMemoryActiveContractStore.scala | 461 ++++--- .../InMemoryRegisteredDomainsStore.scala | 43 +- .../memory/InMemoryRequestJournalStore.scala | 16 +- .../InMemorySyncDomainPersistentState.scala | 8 +- .../participant/sync/CantonSyncService.scala | 102 +- .../canton/participant/sync/SyncDomain.scala | 7 +- .../admin/data/GeneratorsData.scala | 2 +- .../admin/inspection/AcsInspectionTest.scala | 2 +- .../protocol/ProtocolProcessorTest.scala | 3 +- .../ConflictDetectionHelpers.scala | 18 +- .../ConflictDetectorTest.scala | 45 +- .../RequestTrackerTest.scala | 13 +- .../TransferInProcessingStepsTest.scala | 15 +- .../transfer/TransferInValidationTest.scala | 7 +- .../TransferOutProcessingStepsTest.scala | 15 +- .../transfer/TransferOutValidationTest.scala | 5 +- .../pruning/AcsCommitmentProcessorTest.scala | 1167 +++++++++++++++-- .../store/ActiveContractStoreTest.scala | 399 ++++-- .../canton/participant/store/HookedAcs.scala | 73 +- .../participant/store/ThrowingAcs.scala | 39 +- .../participant/store/TransferStoreTest.scala | 3 +- .../store/db/DbActiveContractStoreTest.scala | 12 +- .../ActiveContractStoreTestInMemory.scala | 12 +- .../LedgerServerPartyNotifierTest.scala | 2 + .../com/digitalasset/canton/BaseTest.scala | 1 - .../symbolic/SymbolicPrivateCrypto.scala | 2 + .../canton/ledger/api/MockMessages.scala | 1 - .../canton/store/db/DbStorageSetup.scala | 2 +- .../metrics/OnDemandMetricsReader.scala | 21 +- .../telemetry/OpenTelemetryFactory.scala | 83 +- .../canton/tracing/TracerProvider.scala | 5 +- .../canton/tracing/TracingConfig.scala | 1 - .../scala/com/daml/error/ErrorResource.scala | 2 - dependencies.json | 16 +- project/BuildCommon.scala | 18 +- project/Dependencies.scala | 14 +- project/project/DamlVersions.scala | 2 +- 330 files changed, 8286 insertions(+), 5654 deletions(-) create mode 100644 community/app-base/src/main/scala/com/digitalasset/canton/metrics/FilteringMetricsReader.scala delete mode 100644 community/app/src/pack/examples/01-simple-topology/simple-topology-x-2.conf rename community/app/src/pack/examples/01-simple-topology/{simple-topology-x.conf => simple-topology.conf} (100%) create mode 100644 community/common/src/main/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputations.scala create mode 100644 community/common/src/test/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoaderTest.scala create mode 100644 community/common/src/test/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputationsTest.scala rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => mediator}/admin/v30/mediator_administration_service.proto (97%) rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => mediator}/admin/v30/mediator_initialization_service.proto (83%) rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => mediator}/admin/v30/sequencer_connection_service.proto (81%) create mode 100644 community/domain/src/main/protobuf/com/digitalasset/canton/mediator/scalapb/package.proto rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => sequencer}/admin/v30/sequencer_administration_service.proto (58%) rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => sequencer}/admin/v30/sequencer_initialization_service.proto (63%) rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => sequencer}/admin/v30/sequencer_initialization_snapshot.proto (98%) rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => sequencer}/admin/v30/sequencer_pruning_administration_service.proto (97%) rename community/domain/src/main/protobuf/com/digitalasset/canton/{domain => sequencer}/admin/v30/sequencer_version_service.proto (94%) create mode 100644 community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/scalapb/package.proto rename community/domain/src/main/scala/com/digitalasset/canton/domain/block/{BlockUpdates.scala => BlockUpdate.scala} (72%) create mode 100644 community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockUpdateEphemeralState.scala delete mode 100644 community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeCommon.scala delete mode 100644 community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeX.scala create mode 100644 community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/OnboardingStateForSequencer.scala delete mode 100644 community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/EphemeralStateTest.scala delete mode 100644 community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/DecryptionParameters.scala delete mode 100644 community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrl.scala delete mode 100644 community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/DecryptionParametersTest.scala delete mode 100644 community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrlTest.scala create mode 100644 community/lib/wartremover/src/main/scala/com/digitalasset/canton/SynchronizedFuture.scala create mode 100644 community/lib/wartremover/src/test/scala/com/digitalasset/canton/SynchronizedFutureTest.scala delete mode 100644 community/participant/src/main/scala/com/digitalasset/canton/participant/admin/DomainConnectivityService.scala create mode 100644 community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActivationsDeactivationsConsistencyCheck.scala diff --git a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/domain/v30/sequencer_connection.proto b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/domain/v30/sequencer_connection.proto index 405781d65..24d0f437f 100644 --- a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/domain/v30/sequencer_connection.proto +++ b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/domain/v30/sequencer_connection.proto @@ -24,6 +24,17 @@ message SequencerConnection { } } +enum SequencerConnectionValidation { + UNKNOWN = 0; + // Do not validate the sequencer connection + DISABLED = 1; + // Validate only the ones we could reach + ACTIVE = 2; + // Validate all the connections + ALL = 3; + +} + message SequencerConnections { option (scalapb.message).companion_extends = "com.digitalasset.canton.version.StorageProtoVersion"; diff --git a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/domain_connectivity.proto b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/domain_connectivity.proto index 2540bf3dc..57a5686a8 100644 --- a/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/domain_connectivity.proto +++ b/community/admin-api/src/main/protobuf/com/digitalasset/canton/admin/participant/v30/domain_connectivity.proto @@ -66,15 +66,25 @@ message ReconnectDomainsResponse {} message RegisterDomainRequest { DomainConnectionConfig add = 1; - // If true, only performs the handshake but does not connect to the domain + // If true, only performs the handshake but does not establish an active connection to the domain // Note that in that case domain connection will need to be performed subsequently + // This will invoke the "handshake" with the sequencer_connect_service and persist the static + // domain parameters for the given domain connection. + // This is useful during major version upgrades where we need to locally initialize the domain + // connection, but don't want to start processing contracts before we have imported the ACS. bool handshake_only = 2; + + // Determines how well the provided sequencer connections should be validated before they are + // persisted. The more paranoid the validation, the higher the chance of the command failing, + // as it will require the sequencer to be online and responsive. + com.digitalasset.canton.admin.domain.v30.SequencerConnectionValidation sequencer_connection_validation = 3; } message RegisterDomainResponse {} message ModifyDomainRequest { DomainConnectionConfig modify = 1; + com.digitalasset.canton.admin.domain.v30.SequencerConnectionValidation sequencer_connection_validation = 2; } message ModifyDomainResponse {} diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala index 33b3257e9..a7d7350f3 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseMediatorAdministrationCommands.scala @@ -12,13 +12,13 @@ import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ import com.digitalasset.canton.admin.pruning.v30.LocatePruningTimestamp import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30 import com.digitalasset.canton.domain.mediator.admin.gprc.{ InitializeMediatorRequestX, InitializeMediatorResponseX, } +import com.digitalasset.canton.mediator.admin.v30 import com.digitalasset.canton.protocol.StaticDomainParameters -import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.sequencing.{SequencerConnectionValidation, SequencerConnections} import com.digitalasset.canton.topology.DomainId import io.grpc.ManagedChannel @@ -47,6 +47,7 @@ object EnterpriseMediatorAdministrationCommands { domainId: DomainId, domainParameters: StaticDomainParameters, sequencerConnections: SequencerConnections, + validation: SequencerConnectionValidation, ) extends BaseMediatorXInitializationCommand[ v30.InitializeMediatorRequest, v30.InitializeMediatorResponse, @@ -58,6 +59,7 @@ object EnterpriseMediatorAdministrationCommands { domainId, domainParameters, sequencerConnections, + validation, ).toProtoV30 ) diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala index ab3dc8034..9f3eca927 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerAdminCommands.scala @@ -12,14 +12,10 @@ import com.digitalasset.canton.admin.api.client.commands.GrpcAdminCommand.{ import com.digitalasset.canton.admin.pruning.v30.LocatePruningTimestamp import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30 -import com.digitalasset.canton.domain.sequencing.admin.grpc.{ - InitializeSequencerRequestX, - InitializeSequencerResponseX, -} +import com.digitalasset.canton.domain.sequencing.admin.grpc.InitializeSequencerResponse import com.digitalasset.canton.domain.sequencing.sequencer.SequencerSnapshot -import com.digitalasset.canton.topology.Member -import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX +import com.digitalasset.canton.sequencer.admin.v30 +import com.digitalasset.canton.topology.{Member, SequencerId} import com.google.protobuf.ByteString import io.grpc.ManagedChannel @@ -47,14 +43,11 @@ object EnterpriseSequencerAdminCommands { v30.SequencerPruningAdministrationServiceGrpc.stub(channel) } - final case class InitializeX( - topologySnapshot: GenericStoredTopologyTransactionsX, - domainParameters: com.digitalasset.canton.protocol.StaticDomainParameters, - sequencerSnapshot: Option[SequencerSnapshot], - ) extends GrpcAdminCommand[ - v30.InitializeSequencerRequest, - v30.InitializeSequencerResponse, - InitializeSequencerResponseX, + final case class InitializeFromOnboardingState(onboardingState: ByteString) + extends GrpcAdminCommand[ + v30.InitializeSequencerFromOnboardingStateRequest, + v30.InitializeSequencerFromOnboardingStateResponse, + InitializeSequencerResponse, ] { override type Svc = v30.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub @@ -65,32 +58,28 @@ object EnterpriseSequencerAdminCommands { override def submitRequest( service: v30.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub, - request: v30.InitializeSequencerRequest, - ): Future[v30.InitializeSequencerResponse] = - service.initializeSequencer(request) + request: v30.InitializeSequencerFromOnboardingStateRequest, + ): Future[v30.InitializeSequencerFromOnboardingStateResponse] = + service.initializeSequencerFromOnboardingState(request) - override def createRequest(): Either[String, v30.InitializeSequencerRequest] = + override def createRequest() + : Either[String, v30.InitializeSequencerFromOnboardingStateRequest] = Right( - InitializeSequencerRequestX( - topologySnapshot, - domainParameters, - sequencerSnapshot, - ).toProtoV30 + v30.InitializeSequencerFromOnboardingStateRequest(onboardingState) ) override def handleResponse( - response: v30.InitializeSequencerResponse - ): Either[String, InitializeSequencerResponseX] = - InitializeSequencerResponseX.fromProtoV30(response).leftMap(_.toString) + response: v30.InitializeSequencerFromOnboardingStateResponse + ): Either[String, InitializeSequencerResponse] = + Right(InitializeSequencerResponse(response.replicated)) } - final case class Initialize( + final case class InitializeFromGenesisState( topologySnapshot: ByteString, domainParameters: com.digitalasset.canton.protocol.StaticDomainParameters, - sequencerSnapshot: ByteString, ) extends GrpcAdminCommand[ - v30.InitializeSequencerVersionedRequest, - v30.InitializeSequencerVersionedResponse, - InitializeSequencerResponseX, + v30.InitializeSequencerFromGenesisStateRequest, + v30.InitializeSequencerFromGenesisStateResponse, + InitializeSequencerResponse, ] { override type Svc = v30.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub @@ -101,23 +90,22 @@ object EnterpriseSequencerAdminCommands { override def submitRequest( service: v30.SequencerInitializationServiceGrpc.SequencerInitializationServiceStub, - request: v30.InitializeSequencerVersionedRequest, - ): Future[v30.InitializeSequencerVersionedResponse] = - service.initializeSequencerVersioned(request) + request: v30.InitializeSequencerFromGenesisStateRequest, + ): Future[v30.InitializeSequencerFromGenesisStateResponse] = + service.initializeSequencerFromGenesisState(request) - override def createRequest(): Either[String, v30.InitializeSequencerVersionedRequest] = + override def createRequest(): Either[String, v30.InitializeSequencerFromGenesisStateRequest] = Right( - v30.InitializeSequencerVersionedRequest( + v30.InitializeSequencerFromGenesisStateRequest( topologySnapshot = topologySnapshot, Some(domainParameters.toProtoV30), - sequencerSnapshot, ) ) override def handleResponse( - response: v30.InitializeSequencerVersionedResponse - ): Either[String, InitializeSequencerResponseX] = - Right(InitializeSequencerResponseX(response.replicated)) + response: v30.InitializeSequencerFromGenesisStateResponse + ): Either[String, InitializeSequencerResponse] = + Right(InitializeSequencerResponse(response.replicated)) } final case class Snapshot(timestamp: CantonTimestamp) @@ -155,6 +143,85 @@ object EnterpriseSequencerAdminCommands { override def timeoutType: TimeoutType = DefaultUnboundedTimeout } + final case class OnboardingState(memberOrTimestamp: Either[SequencerId, CantonTimestamp]) + extends BaseSequencerAdministrationCommand[ + v30.OnboardingStateRequest, + v30.OnboardingStateResponse, + ByteString, + ] { + override def createRequest(): Either[String, v30.OnboardingStateRequest] = { + Right( + v30.OnboardingStateRequest(request = + memberOrTimestamp.fold[v30.OnboardingStateRequest.Request]( + member => v30.OnboardingStateRequest.Request.SequencerId(member.toProtoPrimitive), + timestamp => v30.OnboardingStateRequest.Request.Timestamp(timestamp.toProtoTimestamp), + ) + ) + ) + } + + override def submitRequest( + service: v30.SequencerAdministrationServiceGrpc.SequencerAdministrationServiceStub, + request: v30.OnboardingStateRequest, + ): Future[v30.OnboardingStateResponse] = service.onboardingState(request) + + override def handleResponse( + response: v30.OnboardingStateResponse + ): Either[String, ByteString] = + response.value match { + case v30.OnboardingStateResponse.Value + .Failure(v30.OnboardingStateResponse.Failure(reason)) => + Left(reason) + case v30.OnboardingStateResponse.Value + .Success( + v30.OnboardingStateResponse.Success(onboardingState) + ) => + Right(onboardingState) + case _ => Left("response is empty") + } + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + + final case class GenesisState( + timestamp: Option[CantonTimestamp] + ) extends BaseSequencerAdministrationCommand[ + v30.GenesisStateRequest, + v30.GenesisStateResponse, + ByteString, + ] { + override def createRequest(): Either[String, v30.GenesisStateRequest] = + Right( + v30.GenesisStateRequest( + timestamp = timestamp.map(_.toProtoTimestamp) + ) + ) + + override def submitRequest( + service: v30.SequencerAdministrationServiceGrpc.SequencerAdministrationServiceStub, + request: v30.GenesisStateRequest, + ): Future[v30.GenesisStateResponse] = service.genesisState(request) + + override def handleResponse( + response: v30.GenesisStateResponse + ): Either[String, ByteString] = + response.value match { + case v30.GenesisStateResponse.Value + .Failure(v30.GenesisStateResponse.Failure(reason)) => + Left(reason) + case v30.GenesisStateResponse.Value + .Success( + v30.GenesisStateResponse.Success(genesisState) + ) => + Right(genesisState) + case _ => Left("response is empty") + } + + // command will potentially take a long time + override def timeoutType: TimeoutType = DefaultUnboundedTimeout + } + final case class Prune(timestamp: CantonTimestamp) extends BaseSequencerPruningAdministrationCommand[ v30.SequencerPruning.PruneRequest, diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerConnectionAdminCommands.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerConnectionAdminCommands.scala index 6151a23fa..26ef9ff13 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerConnectionAdminCommands.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/EnterpriseSequencerConnectionAdminCommands.scala @@ -5,8 +5,8 @@ package com.digitalasset.canton.admin.api.client.commands import cats.implicits.toTraverseOps import cats.syntax.either.* -import com.digitalasset.canton.domain.admin.v30 -import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.mediator.admin.v30 +import com.digitalasset.canton.sequencing.{SequencerConnectionValidation, SequencerConnections} import io.grpc.ManagedChannel import scala.concurrent.Future @@ -47,8 +47,10 @@ object EnterpriseSequencerConnectionAdminCommands { } } - final case class SetConnection(connections: SequencerConnections) - extends BaseSequencerConnectionAdminCommand[ + final case class SetConnection( + connections: SequencerConnections, + validation: SequencerConnectionValidation, + ) extends BaseSequencerConnectionAdminCommand[ v30.SetConnectionRequest, v30.SetConnectionResponse, Unit, @@ -59,7 +61,10 @@ object EnterpriseSequencerConnectionAdminCommands { ): Future[v30.SetConnectionResponse] = service.setConnection(request) override def createRequest(): Either[String, v30.SetConnectionRequest] = Right( - v30.SetConnectionRequest(Some(connections.toProtoV30)) + v30.SetConnectionRequest( + Some(connections.toProtoV30), + sequencerConnectionValidation = validation.toProtoV30, + ) ) override def handleResponse(response: v30.SetConnectionResponse): Either[String, Unit] = diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala index 7af8552da..092edcba5 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/ParticipantAdminCommands.scala @@ -40,6 +40,7 @@ import com.digitalasset.canton.participant.admin.grpc.{ import com.digitalasset.canton.participant.domain.DomainConnectionConfig as CDomainConnectionConfig import com.digitalasset.canton.participant.sync.UpstreamOffsetConvert import com.digitalasset.canton.protocol.LfContractId +import com.digitalasset.canton.sequencing.SequencerConnectionValidation import com.digitalasset.canton.serialization.ProtoConverter.InstantConverter import com.digitalasset.canton.topology.{DomainId, PartyId} import com.digitalasset.canton.tracing.TraceContext @@ -736,14 +737,18 @@ object ParticipantAdminCommands { } } - final case class RegisterDomain(config: CDomainConnectionConfig, handshakeOnly: Boolean) - extends Base[RegisterDomainRequest, RegisterDomainResponse, Unit] { + final case class RegisterDomain( + config: CDomainConnectionConfig, + handshakeOnly: Boolean, + sequencerConnectionValidation: SequencerConnectionValidation, + ) extends Base[RegisterDomainRequest, RegisterDomainResponse, Unit] { override def createRequest(): Either[String, RegisterDomainRequest] = Right( RegisterDomainRequest( add = Some(config.toProtoV30), handshakeOnly = handshakeOnly, + sequencerConnectionValidation = sequencerConnectionValidation.toProtoV30, ) ) @@ -761,11 +766,18 @@ object ParticipantAdminCommands { } - final case class ModifyDomainConnection(config: CDomainConnectionConfig) - extends Base[ModifyDomainRequest, ModifyDomainResponse, Unit] { + final case class ModifyDomainConnection( + config: CDomainConnectionConfig, + sequencerConnectionValidation: SequencerConnectionValidation, + ) extends Base[ModifyDomainRequest, ModifyDomainResponse, Unit] { override def createRequest(): Either[String, ModifyDomainRequest] = - Right(ModifyDomainRequest(modify = Some(config.toProtoV30))) + Right( + ModifyDomainRequest( + modify = Some(config.toProtoV30), + sequencerConnectionValidation = sequencerConnectionValidation.toProtoV30, + ) + ) override def submitRequest( service: DomainConnectivityServiceStub, diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala index d00df62cd..8c8d3cb0c 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/commands/SequencerAdminCommands.scala @@ -7,9 +7,9 @@ import cats.syntax.either.* import cats.syntax.traverse.* import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin import com.digitalasset.canton.domain.sequencing.sequencer.SequencerPruningStatus import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerTrafficStatus +import com.digitalasset.canton.sequencer.admin import com.digitalasset.canton.topology.Member import io.grpc.ManagedChannel diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala index fab417b93..c473fdf5d 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/DomainParameters.scala @@ -29,7 +29,7 @@ import com.digitalasset.canton.time.{ } import com.digitalasset.canton.util.BinaryFileUtil import com.digitalasset.canton.version.{ProtoVersion, ProtocolVersion} -import com.digitalasset.canton.crypto as DomainCrypto +import com.digitalasset.canton.{config, crypto as DomainCrypto} import com.google.common.annotations.VisibleForTesting import io.scalaland.chimney.dsl.* @@ -42,7 +42,6 @@ final case class StaticDomainParameters( requiredHashAlgorithms: Set[HashAlgorithm], requiredCryptoKeyFormats: Set[CryptoKeyFormat], protocolVersion: ProtocolVersion, - acsCommitmentsCatchUp: Option[AcsCommitmentsCatchUpConfig], ) { def writeToFile(outputFile: String): Unit = BinaryFileUtil.writeByteStringToFile(outputFile, toInternal.toByteString) @@ -65,7 +64,6 @@ final case class StaticDomainParameters( requiredCryptoKeyFormats.map(_.transformInto[DomainCrypto.CryptoKeyFormat]) ), protocolVersion = protocolVersion, - acsCommitmentsCatchUp = acsCommitmentsCatchUp, ) } @@ -116,7 +114,6 @@ object StaticDomainParameters { requiredCryptoKeyFormats = domain.requiredCryptoKeyFormats.forgetNE.map(_.transformInto[CryptoKeyFormat]), protocolVersion = domain.protocolVersion, - acsCommitmentsCatchUp = domain.acsCommitmentsCatchUp, ) def tryReadFromFile(inputFile: String): StaticDomainParameters = { @@ -146,8 +143,12 @@ final case class DynamicDomainParameters( sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, trafficControlParameters: Option[TrafficControlParameters], onboardingRestriction: OnboardingRestriction, + acsCommitmentsCatchUpConfig: Option[AcsCommitmentsCatchUpConfig], ) { + def decisionTimeout: config.NonNegativeFiniteDuration = + confirmationResponseTimeout + mediatorReactionTimeout + if (ledgerTimeRecordTimeTolerance * 2 > mediatorDeduplicationTimeout) throw new InvalidDynamicDomainParameters( s"The ledgerTimeRecordTimeTolerance ($ledgerTimeRecordTimeTolerance) must be at most half of the " + @@ -179,6 +180,7 @@ final case class DynamicDomainParameters( sequencerAggregateSubmissionTimeout, trafficControlParameters: Option[TrafficControlParameters] = trafficControlParameters, onboardingRestriction: OnboardingRestriction = onboardingRestriction, + acsCommitmentsCatchUpConfig: Option[AcsCommitmentsCatchUpConfig] = acsCommitmentsCatchUpConfig, ): DynamicDomainParameters = this.copy( confirmationResponseTimeout = confirmationResponseTimeout, mediatorReactionTimeout = mediatorReactionTimeout, @@ -192,6 +194,7 @@ final case class DynamicDomainParameters( sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, trafficControlParameters = trafficControlParameters, onboardingRestriction = onboardingRestriction, + acsCommitmentsCatchUpConfig = acsCommitmentsCatchUpConfig, ) private[canton] def toInternal: Either[String, DynamicDomainParametersInternal] = @@ -218,6 +221,7 @@ final case class DynamicDomainParameters( InternalNonNegativeFiniteDuration.fromConfig(sequencerAggregateSubmissionTimeout), trafficControlParameters = trafficControlParameters.map(_.toInternal), onboardingRestriction = onboardingRestriction, + acsCommitmentsCatchUpConfigParameter = acsCommitmentsCatchUpConfig, )(rpv) } } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TrafficControlParameters.scala b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TrafficControlParameters.scala index ca88792eb..f1d482499 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TrafficControlParameters.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/admin/api/client/data/TrafficControlParameters.scala @@ -15,6 +15,7 @@ final case class TrafficControlParameters( maxBaseTrafficAmount: NonNegativeLong, readVsWriteScalingFactor: PositiveInt, maxBaseTrafficAccumulationDuration: config.NonNegativeFiniteDuration, + enforceRateLimiting: Boolean, ) { private[canton] def toInternal: TrafficControlParametersInternal = @@ -23,5 +24,6 @@ final case class TrafficControlParameters( readVsWriteScalingFactor = readVsWriteScalingFactor, maxBaseTrafficAccumulationDuration = InternalNonNegativeFiniteDuration.fromConfig(maxBaseTrafficAccumulationDuration), + enforceRateLimiting = enforceRateLimiting, ) } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala b/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala index b56d4da0c..53df291c0 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/config/CantonConfig.scala @@ -582,9 +582,6 @@ object CantonConfig { implicit val tracingConfigDisabledSpanExporterReader : ConfigReader[TracingConfig.Exporter.Disabled.type] = deriveReader[TracingConfig.Exporter.Disabled.type] - implicit val tracingConfigJaegerSpanExporterReader - : ConfigReader[TracingConfig.Exporter.Jaeger] = - deriveReader[TracingConfig.Exporter.Jaeger] implicit val tracingConfigZipkinSpanExporterReader : ConfigReader[TracingConfig.Exporter.Zipkin] = deriveReader[TracingConfig.Exporter.Zipkin] @@ -889,6 +886,10 @@ object CantonConfig { deriveReader[MetricsReporterConfig.Prometheus] lazy implicit val metricsConfigCsvReader: ConfigReader[MetricsReporterConfig.Csv] = deriveReader[MetricsReporterConfig.Csv] + lazy implicit val metricsConfigLoggingReader: ConfigReader[MetricsReporterConfig.Logging] = + deriveReader[MetricsReporterConfig.Logging] + lazy implicit val metricsConfigJvmConfigReader: ConfigReader[MetricsConfig.JvmMetrics] = + deriveReader[MetricsConfig.JvmMetrics] lazy implicit val metricsReporterConfigReader: ConfigReader[MetricsReporterConfig] = deriveReader[MetricsReporterConfig] lazy implicit val histogramDefinitionConfigReader: ConfigReader[HistogramDefinition] = @@ -1001,9 +1002,6 @@ object CantonConfig { implicit val tracingConfigDisabledSpanExporterWriter : ConfigWriter[TracingConfig.Exporter.Disabled.type] = deriveWriter[TracingConfig.Exporter.Disabled.type] - implicit val tracingConfigJaegerSpanExporterWriter - : ConfigWriter[TracingConfig.Exporter.Jaeger] = - deriveWriter[TracingConfig.Exporter.Jaeger] implicit val tracingConfigZipkinSpanExporterWriter : ConfigWriter[TracingConfig.Exporter.Zipkin] = deriveWriter[TracingConfig.Exporter.Zipkin] @@ -1276,7 +1274,10 @@ object CantonConfig { deriveWriter[MetricsReporterConfig.Prometheus] lazy implicit val metricsConfigCsvWriter: ConfigWriter[MetricsReporterConfig.Csv] = deriveWriter[MetricsReporterConfig.Csv] - + lazy implicit val metricsConfigLoggingWriter: ConfigWriter[MetricsReporterConfig.Logging] = + deriveWriter[MetricsReporterConfig.Logging] + lazy implicit val metricsConfigJvmMetricsWriter: ConfigWriter[MetricsConfig.JvmMetrics] = + deriveWriter[MetricsConfig.JvmMetrics] lazy implicit val metricsReporterConfigWriter: ConfigWriter[MetricsReporterConfig] = deriveWriter[MetricsReporterConfig] lazy implicit val histogramDefinitionConfigWriter: ConfigWriter[HistogramDefinition] = diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala index 24449c800..f04c18375 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleEnvironmentBinding.scala @@ -31,6 +31,7 @@ class ConsoleEnvironmentBinding { |import com.digitalasset.canton.SequencerAlias |import com.digitalasset.canton.sequencing.SequencerConnection |import com.digitalasset.canton.sequencing.SequencerConnections + |import com.digitalasset.canton.sequencing.SequencerConnectionValidation._ |import com.digitalasset.canton.sequencing.GrpcSequencerConnection |$consoleMacrosImport |import com.digitalasset.canton.console.commands.DomainChoice diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala index a1a4a67bb..32a6e3786 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/ConsoleMacros.scala @@ -46,9 +46,14 @@ import com.digitalasset.canton.participant.admin.repair.RepairService import com.digitalasset.canton.participant.config.BaseParticipantConfig import com.digitalasset.canton.protocol.SerializableContract.LedgerCreateTime import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.sequencing.{SequencerConnectionValidation, SequencerConnections} import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.TopologyStoreId.AuthorizedStore +import com.digitalasset.canton.topology.store.{ + StoredTopologyTransactionX, + StoredTopologyTransactionsX, +} import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.{ GenericSignedTopologyTransactionX, PositiveSignedTopologyTransactionX, @@ -849,7 +854,7 @@ trait ConsoleMacros extends NamedLogging with NoTracing { .toRight("you need at least one sequencer") neMediators <- NonEmpty.from(mediators.distinct).toRight("you need at least one mediator") nodes = neOwners ++ neSequencers ++ neMediators - _ = EitherUtil.condUnitE(nodes.forall(_.health.running()), "all nodes must be running") + _ = EitherUtil.condUnitE(nodes.forall(_.health.is_running()), "all nodes must be running") ns <- expected_namespace(neOwners) expectedId = ns.map(ns => DomainId(UniqueIdentifier.tryCreate(name, ns.toProtoPrimitive))) actualIdIfAllNodesAreInitialized <- expectedId.fold( @@ -910,9 +915,22 @@ trait ConsoleMacros extends NamedLogging with NoTracing { .toSeq .sortBy(tx => orderingMap(tx.mapping.code)) + val storedTopologySnapshot = StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]( + merged.map(stored => + StoredTopologyTransactionX( + SequencedTime(SignedTopologyTransactionX.InitialTopologySequencingTime), + EffectiveTime(SignedTopologyTransactionX.InitialTopologySequencingTime), + None, + stored, + ) + ) + ).toByteString(staticDomainParameters.protocolVersion) + sequencers .filterNot(_.health.initialized()) - .foreach(x => x.setup.assign_from_beginning(merged, staticDomainParameters).discard) + .foreach(x => + x.setup.assign_from_genesis_state(storedTopologySnapshot, staticDomainParameters).discard + ) mediators .filter(!_.health.initialized()) @@ -926,6 +944,10 @@ trait ConsoleMacros extends NamedLogging with NoTracing { PositiveInt.one, PositiveInt.one, ), + // if we run bootstrap ourselves, we should have been able to reach the nodes + // so we don't want the bootstrapping to fail spuriously here in the middle of + // the setup + SequencerConnectionValidation.Disabled, ) ) diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala index b7f90bc86..ea596f9c4 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/InstanceReference.scala @@ -19,12 +19,10 @@ import com.digitalasset.canton.console.CommandErrors.NodeNotStarted import com.digitalasset.canton.console.commands.* import com.digitalasset.canton.crypto.Crypto import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30.SequencerPruningAdministrationServiceGrpc -import com.digitalasset.canton.domain.admin.v30.SequencerPruningAdministrationServiceGrpc.SequencerPruningAdministrationServiceStub import com.digitalasset.canton.domain.mediator.{ + MediatorNode, MediatorNodeBootstrapX, MediatorNodeConfigCommon, - MediatorNodeX, RemoteMediatorConfig, } import com.digitalasset.canton.domain.sequencing.config.{ @@ -51,6 +49,8 @@ import com.digitalasset.canton.participant.{ ParticipantNodeCommon, ParticipantNodeX, } +import com.digitalasset.canton.sequencer.admin.v30.SequencerPruningAdministrationServiceGrpc +import com.digitalasset.canton.sequencer.admin.v30.SequencerPruningAdministrationServiceGrpc.SequencerPruningAdministrationServiceStub import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnections} import com.digitalasset.canton.time.EnrichedDurations.* import com.digitalasset.canton.topology.* @@ -544,7 +544,7 @@ abstract class ParticipantReference( val connected = domains.list_connected().map(_.domainId).toSet // for every participant consoleEnvironment.participants.all - .filter(p => p.health.running() && p.health.initialized()) + .filter(p => p.health.is_running() && p.health.initialized()) .foreach { participant => // for every domain this participant is connected to as well participant.domains.list_connected().foreach { @@ -1280,7 +1280,7 @@ class LocalMediatorReference(consoleEnvironment: ConsoleEnvironment, val name: S extends MediatorReference(consoleEnvironment, name) with LocalInstanceReference with SequencerConnectionAdministration - with BaseInspection[MediatorNodeX] { + with BaseInspection[MediatorNode] { override protected[canton] def executionContext: ExecutionContext = consoleEnvironment.environment.executionContext diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala index cb9cb7905..81b4a22fa 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/ParticipantReferencesExtensions.scala @@ -9,6 +9,7 @@ import com.digitalasset.canton.config.NonNegativeDuration import com.digitalasset.canton.console.commands.ParticipantCommands import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.domain.DomainConnectionConfig +import com.digitalasset.canton.sequencing.SequencerConnectionValidation import com.digitalasset.canton.{DomainAlias, SequencerAlias} class ParticipantReferencesExtensions(participants: Seq[ParticipantReference])(implicit @@ -97,10 +98,14 @@ class ParticipantReferencesExtensions(participants: Seq[ParticipantReference])(i .discard @Help.Summary("Register and potentially connect to domain") - def register(config: DomainConnectionConfig, handshakeOnly: Boolean = false): Unit = + def register( + config: DomainConnectionConfig, + handshakeOnly: Boolean = false, + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, + ): Unit = ConsoleCommandResult .runAll(participants)( - ParticipantCommands.domains.register(_, config, handshakeOnly = handshakeOnly) + ParticipantCommands.domains.register(_, config, handshakeOnly = handshakeOnly, validation) ) .discard diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala index f45749b76..5e4266fea 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/HealthAdministration.scala @@ -107,7 +107,7 @@ abstract class HealthAdministrationCommon[S <: data.NodeStatus.Status]( })) @Help.Summary("Check if the node is running") - def running(): Boolean = + def is_running(): Boolean = // in case the node is not reachable, we assume it is not running falseIfUnreachable(runningCommand) @@ -128,7 +128,7 @@ abstract class HealthAdministrationCommon[S <: data.NodeStatus.Status]( } @Help.Summary("Wait for the node to be running") - def wait_for_running(): Unit = waitFor(running()) + def wait_for_running(): Unit = waitFor(is_running()) @Help.Summary("Wait for the node to be initialized") def wait_for_initialized(): Unit = { diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala index 9c5872ed6..3f43461aa 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/LedgerApiAdministration.scala @@ -2289,7 +2289,7 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration { // way to get the record time of the transaction to pass to the parties.list call. val domainPartiesAndParticipants = { consoleEnvironment.participants.all.iterator - .filter(x => x.health.running() && x.health.initialized() && x.name == name) + .filter(x => x.health.is_running() && x.health.initialized() && x.name == name) .flatMap(_.parties.list(filterDomain = txDomain.filterString)) .toSet } @@ -2330,7 +2330,7 @@ trait LedgerApiAdministration extends BaseLedgerApiAdministration { for { participantReference <- consoleEnvironment.participants.all - .filter(x => x.health.running() && x.health.initialized()) + .filter(x => x.health.is_running() && x.health.initialized()) .find(identityIs(_, pd.participant)) _ <- pd.domains.find(_.domain == txDomain) } yield participantReference diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala index fab52ced6..f527a175b 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/MediatorAdministrationGroup.scala @@ -24,9 +24,9 @@ import com.digitalasset.canton.console.{ Helpful, } import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30 import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.mediator.admin.v30 +import com.digitalasset.canton.sequencing.{SequencerConnectionValidation, SequencerConnections} import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.DomainId @@ -135,12 +135,15 @@ class MediatorXSetupGroup(consoleCommandGroup: ConsoleCommandGroup) domainId: DomainId, domainParameters: StaticDomainParameters, sequencerConnections: SequencerConnections, + sequencerConnectionValidation: SequencerConnectionValidation = + SequencerConnectionValidation.All, ): Unit = consoleEnvironment.run { runner.adminCommand( InitializeX( domainId, domainParameters.toInternal, sequencerConnections, + sequencerConnectionValidation, ) ) } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala index 0dd37f326..5c3e7979e 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantAdministration.scala @@ -61,6 +61,7 @@ import com.digitalasset.canton.protocol.{LfCommittedTransaction, SerializableCon import com.digitalasset.canton.sequencing.{ PossiblyIgnoredProtocolEvent, SequencerConnection, + SequencerConnectionValidation, SequencerConnections, } import com.digitalasset.canton.serialization.ProtoConverter @@ -164,10 +165,11 @@ private[console] object ParticipantCommands { runner: AdminCommandRunner, config: DomainConnectionConfig, handshakeOnly: Boolean, + validation: SequencerConnectionValidation, ) = runner.adminCommand( ParticipantAdminCommands.DomainConnectivity - .RegisterDomain(config, handshakeOnly = handshakeOnly) + .RegisterDomain(config, handshakeOnly = handshakeOnly, validation) ) def reconnect(runner: AdminCommandRunner, domainAlias: DomainAlias, retry: Boolean) = { @@ -1052,6 +1054,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { synchronize: Option[NonNegativeDuration] = Some( consoleEnvironment.commandTimeouts.bounded ), + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): Unit = { val config = ParticipantCommands.domains.reference_to_config( NonEmpty.mk(Seq, SequencerAlias.Default -> domain).toMap, @@ -1060,7 +1063,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { maxRetryDelayMillis.map(NonNegativeFiniteDuration.tryOfMillis), priority, ) - connectFromConfig(config, synchronize) + connect_by_config(config, validation, synchronize) } @Help.Summary( @@ -1074,6 +1077,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { maxRetryDelayMillis - Maximal amount of time (in milliseconds) between two connection attempts. priority - The priority of the domain. The higher the more likely a domain will be used. synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + validation - Whether to validate the connectivity and ids of the given sequencers (default All) """) def register( domain: SequencerNodeReference, @@ -1084,6 +1088,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { synchronize: Option[NonNegativeDuration] = Some( consoleEnvironment.commandTimeouts.bounded ), + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): Unit = { val config = ParticipantCommands.domains.reference_to_config( NonEmpty.mk(Seq, SequencerAlias.Default -> domain).toMap, @@ -1092,7 +1097,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { maxRetryDelayMillis.map(NonNegativeFiniteDuration.tryOfMillis), priority, ) - register_with_config(config, handshakeOnly = handshakeOnly, synchronize) + register_with_config(config, handshakeOnly = handshakeOnly, validation, synchronize) } @Help.Summary( @@ -1102,11 +1107,13 @@ trait ParticipantAdministration extends FeatureFlagFilter { The arguments are: config - Config for the domain connection handshake only - If yes, only the handshake will be perfomed (no domain connection) + validation - Whether to validate the connectivity and ids of the given sequencers (default All) synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. """) def register_with_config( config: DomainConnectionConfig, handshakeOnly: Boolean, + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, synchronize: Option[NonNegativeDuration] = Some( consoleEnvironment.commandTimeouts.bounded ), @@ -1116,7 +1123,12 @@ trait ParticipantAdministration extends FeatureFlagFilter { if (current.isEmpty) { // register the domain configuration consoleEnvironment.run { - ParticipantCommands.domains.register(runner, config, handshakeOnly = handshakeOnly) + ParticipantCommands.domains.register( + runner, + config, + handshakeOnly = handshakeOnly, + validation, + ) } } synchronize.foreach { timeout => @@ -1135,6 +1147,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { ), sequencerTrustThreshold: PositiveInt = PositiveInt.one, submissionRequestAmplification: PositiveInt = PositiveInt.one, + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): Unit = { val config = ParticipantCommands.domains.reference_to_config( domain, @@ -1145,7 +1158,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { sequencerTrustThreshold, submissionRequestAmplification, ) - connectFromConfig(config, synchronize) + connect_by_config(config, validation, synchronize) } @Help.Summary("Macro to connect a participant to a domain given by connection") @@ -1153,46 +1166,25 @@ trait ParticipantAdministration extends FeatureFlagFilter { |Otherwise the behaviour is equivalent to the connect command with explicit |arguments. If the domain is already configured, the domain connection |will be attempted. If however the domain is offline, the command will fail. - |Generally, this macro should only be used to setup a new domain. However, for + |Generally, this macro should only be used for the first connection to a new domain. However, for |convenience, we support idempotent invocations where subsequent calls just ensure |that the participant reconnects to the domain. - |""") - def connect( - config: DomainConnectionConfig - ): Unit = { - connectFromConfig(config, None) - } - @Help.Summary("Macro to connect a participant to a domain given by instance") - @Help.Description("""This variant of connect expects an instance with a sequencer connection. - |Otherwise the behaviour is equivalent to the connect command with explicit - |arguments. If the domain is already configured, the domain connection - |will be attempted. If however the domain is offline, the command will fail. - |Generally, this macro should only be used to setup a new domain. However, for - |convenience, we support idempotent invocations where subsequent calls just ensure - |that the participant reconnects to the domain. + validation - Whether to validate the connectivity and ids of the given sequencers (default all) |""") - def connect( - instance: SequencerNodeReference, - domainAlias: DomainAlias, - ): Unit = - connect( - DomainConnectionConfig( - domainAlias, - SequencerConnections.single(instance.sequencerConnection), - ) - ) - - private def connectFromConfig( + def connect_by_config( config: DomainConnectionConfig, - synchronize: Option[NonNegativeDuration], + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, + synchronize: Option[NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.unbounded + ), ): Unit = { val current = this.config(config.domain) if (current.isEmpty) { // architecture-handbook-entry-begin: OnboardParticipantConnect // register the domain configuration consoleEnvironment.run { - ParticipantCommands.domains.register(runner, config, handshakeOnly = false) + ParticipantCommands.domains.register(runner, config, handshakeOnly = false, validation) } if (!config.manualConnect) { reconnect(config.domain.unwrap, retry = false).discard @@ -1209,6 +1201,26 @@ trait ParticipantAdministration extends FeatureFlagFilter { } } + @Help.Summary("Macro to connect a participant to a domain given by instance") + @Help.Description("""This variant of connect expects an instance with a sequencer connection. + |Otherwise the behaviour is equivalent to the connect command with explicit + |arguments. If the domain is already configured, the domain connection + |will be attempted. If however the domain is offline, the command will fail. + |Generally, this macro should only be used for the first connection to a new domain. However, for + |convenience, we support idempotent invocations where subsequent calls just ensure + |that the participant reconnects to the domain. + |""") + def connect( + instance: SequencerNodeReference, + domainAlias: DomainAlias, + ): Unit = + connect_by_config( + DomainConnectionConfig( + domainAlias, + SequencerConnections.single(instance.sequencerConnection), + ) + ) + @Help.Summary("Macro to connect a participant to a domain given by connection") @Help.Description("""The connect macro performs a series of commands in order to connect this participant to a domain. |First, `register` will be invoked with the given arguments, but first registered @@ -1227,6 +1239,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { priority - The priority of the domain. The higher the more likely a domain will be used. timeTrackerConfig - The configuration for the domain time tracker. synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + validation - Whether to validate the connectivity and ids of the given sequencers (default All) """) def connect( domainAlias: DomainAlias, @@ -1239,6 +1252,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { synchronize: Option[NonNegativeDuration] = Some( consoleEnvironment.commandTimeouts.bounded ), + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): DomainConnectionConfig = { val config = ParticipantCommands.domains.to_config( domainAlias, @@ -1249,7 +1263,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { priority, timeTrackerConfig = timeTrackerConfig, ) - connectFromConfig(config, synchronize) + connect_by_config(config, validation, synchronize) config } @@ -1272,6 +1286,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { domainAlias - The name you will be using to refer to this domain. Can not be changed anymore. connections - The sequencer connection definitions (can be an URL) to connect to this domain. I.e. https://url:port synchronize - A timeout duration indicating how long to wait for all topology changes to have been effected on all local nodes. + validation - Whether to validate the connectivity and ids of the given sequencers (default All) """) def connect_multi( domainAlias: DomainAlias, @@ -1279,6 +1294,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { synchronize: Option[NonNegativeDuration] = Some( consoleEnvironment.commandTimeouts.bounded ), + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): DomainConnectionConfig = { val sequencerConnection = SequencerConnection.merge(connections).getOrElse(sys.error("Invalid sequencer connection")) @@ -1288,7 +1304,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { domainAlias, sequencerConnections, ) - connectFromConfig(config, synchronize) + connect_by_config(config, validation, synchronize) config } @@ -1412,6 +1428,7 @@ trait ParticipantAdministration extends FeatureFlagFilter { def modify( domain: DomainAlias, modifier: DomainConnectionConfig => DomainConnectionConfig, + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): Unit = { consoleEnvironment.runE { for { @@ -1427,7 +1444,10 @@ trait ParticipantAdministration extends FeatureFlagFilter { if (newConfig.domain == cfg.domain) Right(()) else Left("We don't support modifying the domain alias of a DomainConnectionConfig.") _ <- adminCommand( - ParticipantAdminCommands.DomainConnectivity.ModifyDomainConnection(modifier(cfg)) + ParticipantAdminCommands.DomainConnectivity.ModifyDomainConnection( + modifier(cfg), + validation, + ) ).toEither } yield () } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala index 2bc2ce2dc..1b3262979 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/ParticipantRepairAdministration.scala @@ -213,14 +213,13 @@ class ParticipantRepairAdministration( |fail under the following circumstances: | - the contract salt used to compute the contract ID is missing | - the contract ID discriminator version is unknown - | - an imported contract references the ID of a contract which is missing from the import | |Note that only the Canton-specific contract ID suffix will be recomputed. The discriminator cannot be |recomputed and will be left as is. | - |The last requirement means that the import process will fail if you try to import a contract without the - |contract ID it references in its payload being present in the import (this is because the contract ID - |requires the payload of the contract to exist in order compute the contract ID for it). + |The recomputation will not be performed on contract IDs referenced in the payload of some imported contract + |but is missing from the import itself (this should mean that the contract was archived, which makes + |recomputation unnecessary). | |If the import process succeeds, the mapping from the old contract IDs to the new contract IDs will be returned. |An empty map means that all contract IDs were valid and no contract ID was recomputed. diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerConnectionAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerConnectionAdministration.scala index 00a0d3f3c..708b2e1c7 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerConnectionAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerConnectionAdministration.scala @@ -7,7 +7,11 @@ import cats.syntax.either.* import com.digitalasset.canton.SequencerAlias import com.digitalasset.canton.admin.api.client.commands.EnterpriseSequencerConnectionAdminCommands import com.digitalasset.canton.console.{AdminCommandRunner, Help, Helpful, InstanceReference} -import com.digitalasset.canton.sequencing.{SequencerConnection, SequencerConnections} +import com.digitalasset.canton.sequencing.{ + SequencerConnection, + SequencerConnectionValidation, + SequencerConnections, +} import scala.util.Try @@ -34,9 +38,12 @@ trait SequencerConnectionAdministration extends Helpful { "This will replace any pre-configured connection details. " + "This command will only work after the node has been initialized." ) - def set(connections: SequencerConnections): Unit = consoleEnvironment.run { + def set( + connections: SequencerConnections, + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, + ): Unit = consoleEnvironment.run { adminCommand( - EnterpriseSequencerConnectionAdminCommands.SetConnection(connections) + EnterpriseSequencerConnectionAdminCommands.SetConnection(connections, validation) ) } @@ -46,10 +53,14 @@ trait SequencerConnectionAdministration extends Helpful { "This will replace any pre-configured connection details. " + "This command will only work after the node has been initialized." ) - def set(connection: SequencerConnection): Unit = consoleEnvironment.run { + def set_single( + connection: SequencerConnection, + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, + ): Unit = consoleEnvironment.run { adminCommand( EnterpriseSequencerConnectionAdminCommands.SetConnection( - SequencerConnections.single(connection) + SequencerConnections.single(connection), + validation, ) ) } @@ -68,7 +79,8 @@ trait SequencerConnectionAdministration extends Helpful { "by passing a modifier function that operates on the existing connection configuration. " ) def modify_connections( - modifier: SequencerConnections => SequencerConnections + modifier: SequencerConnections => SequencerConnections, + validation: SequencerConnectionValidation = SequencerConnectionValidation.All, ): Unit = consoleEnvironment.runE { for { @@ -78,7 +90,7 @@ trait SequencerConnectionAdministration extends Helpful { conn <- connOption.toRight("Node not yet initialized") newConn <- Try(modifier(conn)).toEither.leftMap(_.getMessage) _ <- adminCommand( - EnterpriseSequencerConnectionAdminCommands.SetConnection(newConn) + EnterpriseSequencerConnectionAdminCommands.SetConnection(newConn, validation) ).toEither } yield () diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerNodeAdministration.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerNodeAdministration.scala index 58d8ec011..fd5e0fd70 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerNodeAdministration.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/SequencerNodeAdministration.scala @@ -3,29 +3,17 @@ package com.digitalasset.canton.console.commands -import cats.syntax.option.* import com.digitalasset.canton.admin.api.client.commands.EnterpriseSequencerAdminCommands import com.digitalasset.canton.admin.api.client.commands.EnterpriseSequencerAdminCommands.{ - Initialize, - InitializeX, + InitializeFromGenesisState, + InitializeFromOnboardingState, } import com.digitalasset.canton.admin.api.client.data.StaticDomainParameters import com.digitalasset.canton.console.Help import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.sequencing.admin.grpc.InitializeSequencerResponseX +import com.digitalasset.canton.domain.sequencing.admin.grpc.InitializeSequencerResponse import com.digitalasset.canton.domain.sequencing.sequencer.SequencerSnapshot -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX -import com.digitalasset.canton.topology.store.{ - StoredTopologyTransactionX, - StoredTopologyTransactionsX, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransactionX.GenericSignedTopologyTransactionX -import com.digitalasset.canton.topology.transaction.{ - SignedTopologyTransactionX, - TopologyChangeOpX, - TopologyMappingX, -} +import com.digitalasset.canton.topology.SequencerId import com.google.protobuf.ByteString class SequencerXSetupGroup(parent: ConsoleCommandGroup) extends ConsoleCommandGroup.Impl(parent) { @@ -42,43 +30,57 @@ class SequencerXSetupGroup(parent: ConsoleCommandGroup) extends ConsoleCommandGr } @Help.Summary( - "Initialize a sequencer from the beginning of the event stream. This should only be called for " + - "sequencer nodes being initialized at the same time as the corresponding domain node. " + - "This is called as part of the domain.setup.bootstrap command, so you are unlikely to need to call this directly." + "Download the onboarding state at a given point in time to bootstrap another sequencer" ) - def assign_from_beginning( - genesisState: Seq[GenericSignedTopologyTransactionX], - domainParameters: StaticDomainParameters, - ): InitializeSequencerResponseX = + def onboarding_state_at_timestamp( + timestamp: CantonTimestamp + ): ByteString = { + consoleEnvironment.run { + runner.adminCommand(EnterpriseSequencerAdminCommands.OnboardingState(Right(timestamp))) + } + } + + @Help.Summary( + "Download the genesis state for a sequencer. We exclude the VettedPackages from this initial state. " + + "This method should be used when performing major upgrades." + ) + def genesis_state_for_sequencer( + timestamp: Option[CantonTimestamp] = None + ): ByteString = { consoleEnvironment.run { runner.adminCommand( - InitializeX( - StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]( - genesisState.map(signed => - StoredTopologyTransactionX( - SequencedTime(SignedTopologyTransactionX.InitialTopologySequencingTime), - EffectiveTime(SignedTopologyTransactionX.InitialTopologySequencingTime), - None, - signed, - ) - ) - ), - domainParameters.toInternal, - None, + EnterpriseSequencerAdminCommands.GenesisState( + timestamp = timestamp ) ) } + } + + @Help.Summary( + "Download the onboarding state for a given sequencer" + ) + def onboarding_state_for_sequencer( + sequencerId: SequencerId + ): ByteString = { + consoleEnvironment.run { + runner.adminCommand(EnterpriseSequencerAdminCommands.OnboardingState(Left(sequencerId))) + } + } - def assign_from_beginning( + @Help.Summary( + "Initialize a sequencer from the beginning of the event stream. This should only be called for " + + "sequencer nodes being initialized at the same time as the corresponding domain node. " + + "This is called as part of the domain.setup.bootstrap command, so you are unlikely to need to call this directly." + ) + def assign_from_genesis_state( genesisState: ByteString, domainParameters: StaticDomainParameters, - ): InitializeSequencerResponseX = + ): InitializeSequencerResponse = consoleEnvironment.run { runner.adminCommand( - Initialize( + InitializeFromGenesisState( genesisState, domainParameters.toInternal, - ByteString.empty(), ) ) } @@ -87,14 +89,10 @@ class SequencerXSetupGroup(parent: ConsoleCommandGroup) extends ConsoleCommandGr "Dynamically initialize a sequencer from a point later than the beginning of the event stream." + "This is called as part of the sequencer.setup.onboard_new_sequencer command, so you are unlikely to need to call this directly." ) - def assign_from_snapshot( - topologySnapshot: GenericStoredTopologyTransactionsX, - sequencerSnapshot: SequencerSnapshot, - domainParameters: StaticDomainParameters, - ): InitializeSequencerResponseX = + def assign_from_onboarding_state(onboardingState: ByteString): InitializeSequencerResponse = consoleEnvironment.run { runner.adminCommand( - InitializeX(topologySnapshot, domainParameters.toInternal, sequencerSnapshot.some) + InitializeFromOnboardingState(onboardingState) ) } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala index 6b2b3dc2c..8a9961b66 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/console/commands/TopologyAdministrationX.scala @@ -10,8 +10,7 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.admin.api.client.commands.TopologyAdminCommandsX import com.digitalasset.canton.admin.api.client.data.topologyx.* import com.digitalasset.canton.admin.api.client.data.{ - DynamicDomainParameters as ConsoleDynamicDomainParameters, - TrafficControlParameters, + DynamicDomainParameters as ConsoleDynamicDomainParameters } import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt, PositiveLong} import com.digitalasset.canton.config.{NonNegativeDuration, RequireTypes} @@ -33,7 +32,6 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.error.CantonError import com.digitalasset.canton.health.admin.data.TopologyQueueStatus import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.protocol.OnboardingRestriction import com.digitalasset.canton.time.EnrichedDurations.* import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.admin.grpc.{BaseQueryX, TopologyStore} @@ -133,11 +131,9 @@ class TopologyAdministrationGroup( ) def identity_transactions() : Seq[SignedTopologyTransactionX[TopologyChangeOpX, TopologyMappingX]] = { - val excludeExceptTopologyMappings = - TopologyMappingX.Code.all.diff(Seq(NamespaceDelegationX.code, OwnerToKeyMappingX.code)) instance.topology.transactions .list( - excludeMappings = excludeExceptTopologyMappings, + filterMappings = Seq(NamespaceDelegationX.code, OwnerToKeyMappingX.code), filterNamespace = instance.id.uid.namespace.filterString, ) .result @@ -149,11 +145,9 @@ class TopologyAdministrationGroup( "Transactions serialized this way should be loaded into another node with load_from_file" ) def export_identity_transactions(file: String): Unit = { - val excludeExceptTopologyMappings = - TopologyMappingX.Code.all.diff(Seq(NamespaceDelegationX.code, OwnerToKeyMappingX.code)) val bytes = instance.topology.transactions .export_topology_snapshot( - excludeMappings = excludeExceptTopologyMappings, + filterMappings = Seq(NamespaceDelegationX.code, OwnerToKeyMappingX.code), filterNamespace = instance.id.uid.namespace.filterString, ) writeToFile(file, bytes) @@ -212,11 +206,22 @@ class TopologyAdministrationGroup( proposals: Boolean = false, timeQuery: TimeQuery = TimeQuery.HeadState, operation: Option[TopologyChangeOpX] = None, + filterMappings: Seq[TopologyMappingX.Code] = Nil, excludeMappings: Seq[TopologyMappingX.Code] = Nil, filterAuthorizedKey: Option[Fingerprint] = None, protocolVersion: Option[String] = None, filterNamespace: String = "", ): StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX] = { + if (filterMappings.nonEmpty && excludeMappings.nonEmpty) { + consoleEnvironment.run( + CommandErrors + .GenericCommandError("Cannot specify both filterMappings and excludeMappings") + ) + } + val excludeMappingsCodes = if (filterMappings.nonEmpty) { + TopologyMappingX.Code.all.diff(filterMappings).map(_.code) + } else excludeMappings.map(_.code) + consoleEnvironment .run { adminCommand( @@ -229,7 +234,7 @@ class TopologyAdministrationGroup( filterSigningKey = filterAuthorizedKey.map(_.toProtoPrimitive).getOrElse(""), protocolVersion.map(ProtocolVersion.tryCreate), ), - excludeMappings = excludeMappings.map(_.code), + excludeMappings = excludeMappingsCodes, filterNamespace = filterNamespace, ) ) @@ -251,11 +256,22 @@ class TopologyAdministrationGroup( proposals: Boolean = false, timeQuery: TimeQuery = TimeQuery.HeadState, operation: Option[TopologyChangeOpX] = None, + filterMappings: Seq[TopologyMappingX.Code] = Nil, excludeMappings: Seq[TopologyMappingX.Code] = Nil, filterAuthorizedKey: Option[Fingerprint] = None, protocolVersion: Option[String] = None, filterNamespace: String = "", ): ByteString = { + if (filterMappings.nonEmpty && excludeMappings.nonEmpty) { + consoleEnvironment.run( + CommandErrors + .GenericCommandError("Cannot specify both filterMappings and excludeMappings") + ) + } + val excludeMappingsCodes = if (filterMappings.nonEmpty) { + TopologyMappingX.Code.all.diff(filterMappings).map(_.code) + } else excludeMappings.map(_.code) + consoleEnvironment .run { adminCommand( @@ -268,36 +284,12 @@ class TopologyAdministrationGroup( filterSigningKey = filterAuthorizedKey.map(_.toProtoPrimitive).getOrElse(""), protocolVersion.map(ProtocolVersion.tryCreate), ), - excludeMappings = excludeMappings.map(_.code), + excludeMappings = excludeMappingsCodes, filterNamespace = filterNamespace, ) ) } } - @Help.Summary("export topology snapshot to a file") - def export_topology_snapshot_to_file( - filterStore: String = AuthorizedStore.filterName, - proposals: Boolean = false, - outputFile: String = TopologyAdministrationX.exportTransactionsDefaultFile, - timeQuery: TimeQuery = TimeQuery.HeadState, - operation: Option[TopologyChangeOpX] = None, - excludeMappings: Seq[TopologyMappingX.Code] = Nil, - filterAuthorizedKey: Option[Fingerprint] = None, - protocolVersion: Option[String] = None, - filterNamespace: String = "", - ): Unit = { - val bytes = export_topology_snapshot( - filterStore, - proposals, - timeQuery, - operation, - excludeMappings, - filterAuthorizedKey, - protocolVersion, - filterNamespace, - ) - writeToFile(outputFile, bytes) - } @Help.Summary("Find the latest transaction for a given mapping hash") @Help.Description( @@ -1975,14 +1967,116 @@ class TopologyAdministrationGroup( } @Help.Summary("Propose changes to the mediator topology") + @Help.Description( + """ + domainId: the target domain + group: the mediator group identifier + adds: The unique identifiers of the active mediators to add. + removes: The unique identifiers of the mediators that should no longer be active mediators. + observerAdds: The unique identifiers of the observer mediators to add. + observerRemoves: The unique identifiers of the mediators that should no longer be observer mediators. + updateThreshold: Optionally an updated value for the threshold of the mediator group. + await: optional timeout to wait for the proposal to be persisted in the specified topology store + mustFullyAuthorize: when set to true, the proposal's previously received signatures and the signature of this node must be + sufficient to fully authorize the topology transaction. if this is not the case, the request fails. + when set to false, the proposal retains the proposal status until enough signatures are accumulated to + satisfy the mapping's authorization requirements. + signedBy: the fingerprint of the key to be used to sign this proposal""" + ) + def propose_delta( + domainId: DomainId, + group: NonNegativeInt, + adds: List[MediatorId] = Nil, + removes: List[MediatorId] = Nil, + observerAdds: List[MediatorId] = Nil, + observerRemoves: List[MediatorId] = Nil, + updateThreshold: Option[PositiveInt] = None, + await: Option[config.NonNegativeDuration] = Some( + consoleEnvironment.commandTimeouts.bounded + ), + mustFullyAuthorize: Boolean = false, + // TODO(#14056) don't use the instance's root namespace key by default. + // let the grpc service figure out the right key to use, once that's implemented + signedBy: Option[Fingerprint] = Some(instance.id.uid.namespace.fingerprint), + ): Unit = { + + MediatorGroupDeltaComputations + .verifyProposalConsistency(adds, removes, observerAdds, observerRemoves, updateThreshold) + .valueOr(err => throw new IllegalArgumentException(err)) + + def queryStore(proposals: Boolean): Option[MediatorDomainStateX] = expectAtMostOneResult( + list( + domainId.filterString, + group = Some(group), + operation = Some(TopologyChangeOpX.Replace), + proposals = proposals, + ) + ).map(_.item) + + val mdsO = queryStore(proposals = false) + + MediatorGroupDeltaComputations + .verifyProposalAgainstCurrentState( + mdsO, + adds, + removes, + observerAdds, + observerRemoves, + updateThreshold, + ) + .valueOr(err => throw new IllegalArgumentException(err)) + + val (threshold, active, observers) = mdsO match { + case Some(mds) => + ( + mds.threshold, + mds.active.forgetNE.concat(adds).diff(removes), + mds.observers.concat(observerAdds).diff(observerRemoves), + ) + case None => + (PositiveInt.one, adds, observerAdds) + } + + propose( + domainId, + updateThreshold.getOrElse(threshold), + active, + observers, + group, + store = Some(domainId.filterString), + synchronize = None, // no synchronize - instead rely on await below + mustFullyAuthorize = mustFullyAuthorize, + signedBy = signedBy, + ).discard + + await.foreach { timeout => + ConsoleMacros.utils.retry_until_true(timeout) { + def areAllChangesPersisted(mds: MediatorDomainStateX): Boolean = { + adds.forall(mds.active.contains) && removes.forall(!mds.active.contains(_)) && + observerAdds.forall(mds.observers.contains) && observerRemoves.forall( + !mds.observers.contains(_) + ) && updateThreshold.forall(_ == mds.threshold) + } + + if (mustFullyAuthorize) { + queryStore(proposals = false).exists(areAllChangesPersisted) + } else { + // If the proposal does not need to be authorized, first check for proposals then for an authorized transaction + queryStore(proposals = true).exists(areAllChangesPersisted) || queryStore(proposals = + false + ).exists(areAllChangesPersisted) + } + } + } + } + + @Help.Summary("Replace the mediator topology") @Help.Description(""" domainId: the target domain threshold: the minimum number of mediators that need to come to a consensus for a message to be sent to other members. active: the list of mediators that will take part in the mediator consensus in this mediator group passive: the mediators that will receive all messages but will not participate in mediator consensus group: the mediator group identifier - - store: - "Authorized": the topology transaction will be stored in the node's authorized store and automatically propagated to connected domains, if applicable. - "": the topology transaction will be directly submitted to the specified domain without @@ -2269,7 +2363,7 @@ class TopologyAdministrationGroup( waitForParameters(TopologyAdministrationGroup.this) waitForParticipants .filter(p => - p.health.running() && p.health.initialized() && p.domains.is_connected(domainId) + p.health.is_running() && p.health.initialized() && p.domains.is_connected(domainId) ) .map(_.topology) .foreach(waitForParameters) @@ -2332,38 +2426,6 @@ class TopologyAdministrationGroup( } } - @Help.Summary( - "Update the confirmation response timeout (for participants) in the dynamic domain parameters" - ) - def set_confirmation_response_timeout( - domainId: DomainId, - timeout: config.NonNegativeFiniteDuration, - ): Unit = propose_update(domainId, _.update(confirmationResponseTimeout = timeout)) - - @Help.Summary("Update the mediator reaction timeout in the dynamic domain parameters") - def set_mediator_reaction_timeout( - domainId: DomainId, - timeout: config.NonNegativeFiniteDuration, - ): Unit = propose_update(domainId, _.update(mediatorReactionTimeout = timeout)) - - @Help.Summary("Update the transfer exclusivity timeout in the dynamic domain parameters") - def set_transfer_exclusivity_timeout( - domainId: DomainId, - timeout: config.NonNegativeFiniteDuration, - ): Unit = propose_update(domainId, _.update(transferExclusivityTimeout = timeout)) - - @Help.Summary("Update the topology change delay in the dynamic domain parameters") - def set_topology_change_delay( - domainId: DomainId, - delay: config.NonNegativeFiniteDuration, - ): Unit = propose_update(domainId, _.update(topologyChangeDelay = delay)) - - @Help.Summary("Update the onboarding restrictions") - def set_onboarding_restrictions( - domainId: DomainId, - restriction: OnboardingRestriction, - ): Unit = propose_update(domainId, _.update(onboardingRestriction = restriction)) - @Help.Summary("Update the ledger time record time tolerance in the dynamic domain parameters") @Help.Description( """If it would be insecure to perform the change immediately, @@ -2523,61 +2585,6 @@ class TopologyAdministrationGroup( force = true, ) } - - @Help.Summary("Update the mediator deduplication timeout in the dynamic domain parameters") - def set_mediator_deduplication_timeout( - domainId: DomainId, - timeout: config.NonNegativeFiniteDuration, - ): Unit = propose_update(domainId, _.update(mediatorDeduplicationTimeout = timeout)) - - @Help.Summary("Update the reconciliation interval in the dynamic domain parameters") - def set_reconciliation_interval( - domainId: DomainId, - interval: config.PositiveDurationSeconds, - ): Unit = propose_update(domainId, _.update(reconciliationInterval = interval)) - - @Help.Summary( - "Update the maximum rate of confirmation requests per participant in the dynamic domain parameters" - ) - def set_confirmation_requests_max_rate( - domainId: DomainId, - rate: NonNegativeInt, - ): Unit = propose_update(domainId, _.update(confirmationRequestsMaxRate = rate)) - - @Help.Summary("Update the maximum request size in the dynamic domain parameters") - @Help.Description( - """The update won't have any effect until the sequencers are restarted.""" - ) - def set_max_request_size( - domainId: DomainId, - size: NonNegativeInt, - ): Unit = propose_update(domainId, _.update(maxRequestSize = size)) - - @Help.Summary( - "Update the sequencer aggregate submission timeout in the dynamic domain parameters" - ) - def set_sequencer_aggregate_submission_timeout( - domainId: DomainId, - timeout: config.NonNegativeFiniteDuration, - ): Unit = - propose_update(domainId, _.update(sequencerAggregateSubmissionTimeout = timeout)) - - @Help.Summary( - "Update the `trafficControlParameters` in the dynamic domain parameters" - ) - def set_traffic_control_parameters( - domainId: DomainId, - trafficControlParameters: TrafficControlParameters, - ): Unit = propose_update( - domainId, - _.update(trafficControlParameters = Some(trafficControlParameters)), - ) - - @Help.Summary( - "Clear the traffic control parameters in the dynamic domain parameters" - ) - def clear_traffic_control_parameters(domainId: DomainId): Unit = - propose_update(domainId, _.update(trafficControlParameters = None)) } @Help.Summary("Inspect topology stores") diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala b/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala index 673377aec..e4114a7c1 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/environment/CommunityEnvironment.scala @@ -27,7 +27,6 @@ import com.digitalasset.canton.console.{ import com.digitalasset.canton.crypto.CommunityCryptoFactory import com.digitalasset.canton.crypto.admin.grpc.GrpcVaultService.CommunityGrpcVaultServiceFactory import com.digitalasset.canton.crypto.store.CryptoPrivateStore.CommunityCryptoPrivateStoreFactory -import com.digitalasset.canton.domain.admin.v30.SequencerPruningAdministrationServiceGrpc import com.digitalasset.canton.domain.mediator.* import com.digitalasset.canton.domain.metrics.MediatorMetrics import com.digitalasset.canton.domain.sequencing.SequencerNodeBootstrapX @@ -41,6 +40,7 @@ import com.digitalasset.canton.resource.{ CommunityStorageFactory, DbMigrationsFactory, } +import com.digitalasset.canton.sequencer.admin.v30.SequencerPruningAdministrationServiceGrpc class CommunityEnvironment( override val config: CantonCommunityConfig, diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala b/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala index 8c33d1a6c..839a1a350 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/environment/Environment.scala @@ -26,6 +26,7 @@ import com.digitalasset.canton.environment.Environment.* import com.digitalasset.canton.environment.ParticipantNodes.ParticipantNodesX import com.digitalasset.canton.lifecycle.Lifecycle import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.metrics.MetricsConfig.JvmMetrics import com.digitalasset.canton.metrics.MetricsRegistry import com.digitalasset.canton.participant.* import com.digitalasset.canton.resource.DbMigrationsFactory @@ -70,10 +71,12 @@ trait Environment extends NamedLogging with AutoCloseable with NoTracing { ) } + config.monitoring.metrics.jvmMetrics + .foreach(JvmMetrics.setup(_, configuredOpenTelemetry.openTelemetry)) + // public for buildDocs task to be able to construct a fake participant and domain to document available metrics via reflection lazy val metricsRegistry: MetricsRegistry = new MetricsRegistry( - config.monitoring.metrics.reportJvmMetrics, configuredOpenTelemetry.openTelemetry.meterBuilder("canton").build(), testingConfig.metricsFactoryType, ) diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala b/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala index 02855b42e..80f5d0739 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/environment/Nodes.scala @@ -11,10 +11,10 @@ import com.digitalasset.canton.DiscardOps import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService import com.digitalasset.canton.config.{DbConfig, LocalNodeConfig, ProcessingTimeout, StorageConfig} import com.digitalasset.canton.domain.mediator.{ + MediatorNode, MediatorNodeBootstrapX, MediatorNodeConfigCommon, MediatorNodeParameters, - MediatorNodeX, } import com.digitalasset.canton.domain.sequencing.config.{ SequencerNodeConfigCommon, @@ -147,6 +147,8 @@ class ManagedNodes[ ) .flatMap(startNode(name, _).map(_ => ())) + // TODO(#17726) Ratko, Thibault: The access to `nodes` in runStartup is not covered by the synchronized block. Are there concurrency issues here? + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) private def startNode( name: InstanceName, config: NodeConfig, @@ -423,7 +425,7 @@ class MediatorNodesX[MNC <: MediatorNodeConfigCommon]( loggerFactory: NamedLoggerFactory, )(implicit ec: ExecutionContext) extends ManagedNodes[ - MediatorNodeX, + MediatorNode, MNC, MediatorNodeParameters, MediatorNodeBootstrapX, diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/metrics/CsvReporter.scala b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/CsvReporter.scala index 00654c8c8..54fc97443 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/metrics/CsvReporter.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/CsvReporter.scala @@ -3,16 +3,16 @@ package com.digitalasset.canton.metrics -import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.tracing.NoTracing -import com.digitalasset.canton.util.DelayUtil import io.opentelemetry.sdk.common.CompletableResultCode +import io.opentelemetry.sdk.metrics.InstrumentType import io.opentelemetry.sdk.metrics.data.{AggregationTemporality, MetricData} -import io.opentelemetry.sdk.metrics.`export`.{MetricProducer, MetricReader, MetricReaderFactory} +import io.opentelemetry.sdk.metrics.`export`.MetricExporter import java.io.{BufferedWriter, File, FileWriter} +import java.util import java.util.concurrent.atomic.AtomicBoolean import scala.collection.concurrent.TrieMap import scala.concurrent.blocking @@ -20,111 +20,91 @@ import scala.jdk.CollectionConverters.* import scala.util.{Failure, Success, Try} class CsvReporter(config: MetricsReporterConfig.Csv, val loggerFactory: NamedLoggerFactory) - extends MetricReaderFactory + extends MetricExporter with NamedLogging with NoTracing { - private val directEC = new DirectExecutionContext(noTracingLogger) + private val running = new AtomicBoolean(true) + private val files = new TrieMap[String, (FileWriter, BufferedWriter)] + private val lock = new Object() - private def includeMetric(data: MetricData): Boolean = { - data.getName.nonEmpty && (config.filters.isEmpty || config.filters.exists( - _.matches(data.getName) - )) - } - - override def apply(producer: MetricProducer): MetricReader = new MetricReader { - - private val lock = new Object() - - private def updateAndSchedule(): Unit = if (running.get()) { - DelayUtil - .delay(config.interval.asFiniteApproximation) - .foreach { _ => - writeMetrics() - updateAndSchedule() - }(directEC) - } - - private val running = new AtomicBoolean(true) - private val files = new TrieMap[String, (FileWriter, BufferedWriter)] - updateAndSchedule() + def getAggregationTemporality(instrumentType: InstrumentType): AggregationTemporality = + AggregationTemporality.CUMULATIVE - override def getPreferredTemporality: AggregationTemporality = AggregationTemporality.CUMULATIVE - - override def flush(): CompletableResultCode = { - writeMetrics() - tryOrStop { - files.foreach { case (_, (_, bufferedWriter)) => - bufferedWriter.flush() - } + override def flush(): CompletableResultCode = { + tryOrStop { + files.foreach { case (_, (_, bufferedWriter)) => + bufferedWriter.flush() } - CompletableResultCode.ofSuccess() } + CompletableResultCode.ofSuccess() + } - override def shutdown(): CompletableResultCode = { - running.set(false) - tryOrStop { - files.foreach { case (_, (file, bufferedWriter)) => - bufferedWriter.close() - file.close() - } + override def shutdown(): CompletableResultCode = { + running.set(false) + tryOrStop { + files.foreach { case (_, (file, bufferedWriter)) => + bufferedWriter.close() + file.close() } - CompletableResultCode.ofSuccess() } + CompletableResultCode.ofSuccess() + } - private def writeMetrics(): Unit = blocking { + def `export`(metrics: util.Collection[MetricData]): CompletableResultCode = { + blocking { lock.synchronized { if (running.get()) { - val metrics = producer.collectAllMetrics() val ts = CantonTimestamp.now() - val filtered = metrics.asScala.filter(includeMetric).flatMap { data => + val converted = metrics.asScala.flatMap { data => MetricValue.fromMetricData(data).map { value => (value, data) } } - filtered.foreach { case (value, metadata) => writeRow(ts, value, metadata) } + converted.foreach { case (value, metadata) => writeRow(ts, value, metadata) } } } } + CompletableResultCode.ofSuccess() + } - private def tryOrStop(res: => Unit): Unit = { - Try(res) match { - case Success(_) => - case Failure(exception) => - logger.warn("Failed to write metrics to csv file. Turning myself off", exception) - running.set(false) - } + private def tryOrStop(res: => Unit): Unit = { + Try(res) match { + case Success(_) => + case Failure(exception) => + logger.warn("Failed to write metrics to csv file. Turning myself off", exception) + running.set(false) } + } - private def writeRow(ts: CantonTimestamp, value: MetricValue, data: MetricData): Unit = if ( - running.get() - ) { - val knownKeys = config.contextKeys.filter(key => value.attributes.contains(key)) - val prefix = knownKeys - .flatMap { key => - value.attributes.get(key).toList - } - .mkString(".") - val name = - ((if (prefix.isEmpty) Seq.empty else Seq(prefix)) ++ Seq(data.getName, "csv")).mkString(".") - tryOrStop { - val (_, bufferedWriter) = files.getOrElseUpdate( - name, { - val file = new File(config.directory, name) - logger.info( - s"Creating new csv file ${file} for metric using keys=${knownKeys} from attributes=${value.attributes.keys}" - ) - file.getParentFile.mkdirs() - val writer = new FileWriter(file, true) - val bufferedWriter = new BufferedWriter(writer) - if (file.length() == 0) { - bufferedWriter.append(value.toCsvHeader(data)) - bufferedWriter.newLine() - } - (writer, bufferedWriter) - }, - ) - bufferedWriter.append(value.toCsvRow(ts, data)) - bufferedWriter.newLine() + private def writeRow(ts: CantonTimestamp, value: MetricValue, data: MetricData): Unit = if ( + running.get() + ) { + val knownKeys = config.contextKeys.filter(key => value.attributes.contains(key)) + val prefix = knownKeys + .flatMap { key => + value.attributes.get(key).toList } + .mkString(".") + val name = + ((if (prefix.isEmpty) Seq.empty else Seq(prefix)) ++ Seq(data.getName, "csv")).mkString(".") + tryOrStop { + val (_, bufferedWriter) = files.getOrElseUpdate( + name, { + val file = new File(config.directory, name) + logger.info( + s"Creating new csv file ${file} for metric using keys=${knownKeys} from attributes=${value.attributes.keys}" + ) + file.getParentFile.mkdirs() + val writer = new FileWriter(file, true) + val bufferedWriter = new BufferedWriter(writer) + if (file.length() == 0) { + bufferedWriter.append(value.toCsvHeader(data)) + bufferedWriter.newLine() + } + (writer, bufferedWriter) + }, + ) + bufferedWriter.append(value.toCsvRow(ts, data)) + bufferedWriter.newLine() } } } diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/metrics/FilteringMetricsReader.scala b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/FilteringMetricsReader.scala new file mode 100644 index 000000000..9b9903baf --- /dev/null +++ b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/FilteringMetricsReader.scala @@ -0,0 +1,50 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.metrics + +import com.digitalasset.canton.metrics.MetricsConfig.MetricsFilterConfig +import io.opentelemetry.sdk.common.CompletableResultCode +import io.opentelemetry.sdk.metrics.InstrumentType +import io.opentelemetry.sdk.metrics.data.{AggregationTemporality, MetricData} +import io.opentelemetry.sdk.metrics.`export`.{CollectionRegistration, MetricReader} + +import java.util +import java.util.stream.Collectors +import scala.collection.concurrent.TrieMap + +class FilteringMetricsReader private (filters: Seq[MetricsFilterConfig], parent: MetricReader) + extends MetricReader { + + // cache the result of the filter for each metric name so we don't have to traverse lists all the time + private val computedFilters = TrieMap[String, Boolean]() + + private def includeMetric(data: MetricData): Boolean = { + if (data.getName.isEmpty) false + else if (filters.isEmpty) true + else computedFilters.getOrElseUpdate(data.getName, filters.exists(_.matches(data.getName))) + } + + override def register(registration: CollectionRegistration): Unit = + parent.register(new CollectionRegistration { + override def collectAllMetrics(): util.Collection[MetricData] = { + registration + .collectAllMetrics() + .stream() + .filter(includeMetric(_)) + .collect(Collectors.toList()) + } + }) + + override def forceFlush(): CompletableResultCode = parent.forceFlush() + + override def shutdown(): CompletableResultCode = parent.shutdown() + + override def getAggregationTemporality(instrumentType: InstrumentType): AggregationTemporality = + parent.getAggregationTemporality(instrumentType) +} + +object FilteringMetricsReader { + def create(filters: Seq[MetricsFilterConfig], parent: MetricReader): MetricReader = + if (filters.isEmpty) parent else new FilteringMetricsReader(filters, parent) +} diff --git a/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala index ecf10a9d5..162fb5bad 100644 --- a/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala +++ b/community/app-base/src/main/scala/com/digitalasset/canton/metrics/MetricsRegistry.scala @@ -3,30 +3,34 @@ package com.digitalasset.canton.metrics +import com.daml.metrics.api.opentelemetry.Slf4jMetricExporter import com.daml.metrics.api.{MetricName, MetricsContext} import com.daml.metrics.grpc.DamlGrpcServerMetrics -import com.daml.metrics.{HealthMetrics as DMHealth, HistogramDefinition} -import com.digitalasset.canton.buildinfo.BuildInfo +import com.daml.metrics.{HealthMetrics, HistogramDefinition} import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.config.RequireTypes.Port import com.digitalasset.canton.domain.metrics.{MediatorMetrics, SequencerMetrics} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.metrics.CantonLabeledMetricsFactory.CantonOpenTelemetryMetricsFactory -import com.digitalasset.canton.metrics.MetricsConfig.MetricsFilterConfig -import com.digitalasset.canton.metrics.MetricsReporterConfig.{Csv, Prometheus} +import com.digitalasset.canton.metrics.MetricsConfig.{JvmMetrics, MetricsFilterConfig} +import com.digitalasset.canton.metrics.MetricsReporterConfig.{Csv, Logging, Prometheus} import com.digitalasset.canton.participant.metrics.ParticipantMetrics import com.digitalasset.canton.{DiscardOps, DomainAlias} import com.typesafe.scalalogging.LazyLogging +import io.opentelemetry.api.OpenTelemetry import io.opentelemetry.api.metrics.Meter import io.opentelemetry.exporter.prometheus.PrometheusHttpServer +import io.opentelemetry.instrumentation.runtimemetrics.java8.* import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder +import io.opentelemetry.sdk.metrics.`export`.{MetricExporter, MetricReader, PeriodicMetricReader} import java.io.File +import java.util.concurrent.ScheduledExecutorService import scala.collection.concurrent.TrieMap final case class MetricsConfig( reporters: Seq[MetricsReporterConfig] = Seq.empty, - reportJvmMetrics: Boolean = false, + jvmMetrics: Option[JvmMetrics] = None, histograms: Seq[HistogramDefinition] = Seq.empty, ) @@ -40,6 +44,29 @@ object MetricsConfig { def matches(name: String): Boolean = name.startsWith(startsWith) && name.contains(contains) && name.endsWith(endsWith) } + + /** Control and enable jvm metrics */ + final case class JvmMetrics( + enabled: Boolean = false, + classes: Boolean = true, + cpu: Boolean = true, + memoryPools: Boolean = true, + threads: Boolean = true, + gc: Boolean = true, + ) + + object JvmMetrics { + def setup(config: JvmMetrics, openTelemetry: OpenTelemetry): Unit = { + if (config.enabled) { + if (config.classes) Classes.registerObservers(openTelemetry).discard + if (config.cpu) Cpu.registerObservers(openTelemetry).discard + if (config.memoryPools) MemoryPools.registerObservers(openTelemetry).discard + if (config.threads) Threads.registerObservers(openTelemetry).discard + if (config.gc) GarbageCollector.registerObservers(openTelemetry).discard + } + } + } + } sealed trait MetricsReporterConfig { @@ -49,11 +76,11 @@ sealed trait MetricsReporterConfig { object MetricsReporterConfig { - final case class Prometheus(address: String = "localhost", port: Port = Port.tryCreate(9464)) - extends MetricsReporterConfig { - // TODO(#16647): ensure prometheus metrics are filtered - override def filters: Seq[MetricsFilterConfig] = Seq.empty - } + final case class Prometheus( + address: String = "localhost", + port: Port = Port.tryCreate(9464), + filters: Seq[MetricsFilterConfig] = Seq.empty, + ) extends MetricsReporterConfig /** CSV metrics reporter configuration * @@ -72,9 +99,21 @@ object MetricsReporterConfig { filters: Seq[MetricsFilterConfig] = Seq.empty, ) extends MetricsReporterConfig + /** Log metrics reporter configuration + * + * This reporter will log the metrics in the given interval + * + * @param interval how often to log the metrics + * @param filters which metrics to include + */ + final case class Logging( + interval: NonNegativeFiniteDuration = NonNegativeFiniteDuration.ofSeconds(30), + filters: Seq[MetricsFilterConfig] = Seq.empty, + logAsInfo: Boolean = true, + ) extends MetricsReporterConfig + } final case class MetricsRegistry( - reportJVMMetrics: Boolean, meter: Meter, factoryType: MetricsFactoryType, ) extends AutoCloseable @@ -84,13 +123,6 @@ final case class MetricsRegistry( private val sequencers = TrieMap[String, SequencerMetrics]() private val mediators = TrieMap[String, MediatorMetrics]() - // add default, system wide metrics to the metrics reporter - if (reportJVMMetrics) { - // TODO(#16647): re-enable jvm metrics - // registry.registerAll(new JvmMetricSet) // register Daml repo JvmMetricSet - // JvmMetricSet.registerObservers() // requires OpenTelemetry to have the global lib setup - } - def forParticipant(name: String): ParticipantMetrics = { participants.getOrElseUpdate( name, { @@ -118,7 +150,7 @@ final case class MetricsRegistry( MetricsRegistry.prefix, labeledMetricsFactory, new DamlGrpcServerMetrics(labeledMetricsFactory, "sequencer"), - new DMHealth(labeledMetricsFactory), + new HealthMetrics(labeledMetricsFactory), ) }, ) @@ -134,7 +166,7 @@ final case class MetricsRegistry( MetricsRegistry.prefix, labeledMetricsFactory, new DamlGrpcServerMetrics(labeledMetricsFactory, "mediator"), - new DMHealth(labeledMetricsFactory), + new HealthMetrics(labeledMetricsFactory), ) }, ) @@ -149,9 +181,7 @@ final case class MetricsRegistry( case MetricsFactoryType.External => new CantonOpenTelemetryMetricsFactory( meter, - globalMetricsContext = MetricsContext( - "canton_version" -> BuildInfo.version - ).merge(extraContext), + globalMetricsContext = extraContext, ) } } @@ -198,26 +228,50 @@ object MetricsRegistry extends LazyLogging { loggerFactory: NamedLoggerFactory, )( sdkMeterProviderBuilder: SdkMeterProviderBuilder - ): SdkMeterProviderBuilder = { + )(implicit scheduledExecutorService: ScheduledExecutorService): SdkMeterProviderBuilder = { if (config.reporters.isEmpty) { - logger.info("No metrics reporters configured. Not starting metrics collection.") + logger.info( + s"No metrics reporters configured. Not starting metrics collection." + ) } - config.reporters.foreach { - case Prometheus(hostname, port) => - logger.info(s"Exposing metrics for Prometheus on port $hostname:$port") - val prometheusServer = PrometheusHttpServer - .builder() - .setHost(hostname) - .setPort(port.unwrap) - .newMetricReaderFactory() - sdkMeterProviderBuilder.registerMetricReader(prometheusServer).discard - case config: Csv => - sdkMeterProviderBuilder - .registerMetricReader( - new CsvReporter(config, loggerFactory) + def buildPeriodicReader( + exporter: MetricExporter, + interval: NonNegativeFiniteDuration, + ): MetricReader = { + PeriodicMetricReader + .builder(exporter) + .setExecutor(scheduledExecutorService) + .setInterval(interval.asJava) + .build() + } + config.reporters + .map { + case Prometheus(hostname, port, _) => + logger.info(s"Exposing metrics for Prometheus on port $hostname:$port") + PrometheusHttpServer + .builder() + .setHost(hostname) + .setPort(port.unwrap) + .build() + case config: Csv => + buildPeriodicReader(new CsvReporter(config, loggerFactory), config.interval) + case config: Logging => + // TODO(#17917) fix upstream slfjmetricexporer + buildPeriodicReader( + new Slf4jMetricExporter( + logAsInfo = config.logAsInfo, + logger = loggerFactory.getLogger(MetricsRegistry.getClass).underlying, + ), + config.interval, ) + + } + .zip(config.reporters) + .foreach { case (reader, config) => + sdkMeterProviderBuilder + .registerMetricReader(FilteringMetricsReader.create(config.filters, reader)) .discard - } + } sdkMeterProviderBuilder } diff --git a/community/app/src/pack/config/monitoring/prometheus.conf b/community/app/src/pack/config/monitoring/prometheus.conf index 4ba1cefc1..240588d9f 100644 --- a/community/app/src/pack/config/monitoring/prometheus.conf +++ b/community/app/src/pack/config/monitoring/prometheus.conf @@ -1,5 +1,5 @@ canton.monitoring.metrics { - report-jvm-metrics = yes + jvm-metrics.enabled = yes reporters = [{ type = prometheus address = 0.0.0.0 diff --git a/community/app/src/pack/config/monitoring/tracing.conf b/community/app/src/pack/config/monitoring/tracing.conf index 84731b414..3dd692927 100644 --- a/community/app/src/pack/config/monitoring/tracing.conf +++ b/community/app/src/pack/config/monitoring/tracing.conf @@ -1,6 +1,6 @@ canton.monitoring.tracing.tracer.exporter = { - // zipkin or otlp are alternatives - type = jaeger + // zipkin is an alternative + type = otlp address = 169.254.0.0 port = 14250 } diff --git a/community/app/src/pack/examples/01-simple-topology/README.md b/community/app/src/pack/examples/01-simple-topology/README.md index 075f7125b..eb0d66b34 100644 --- a/community/app/src/pack/examples/01-simple-topology/README.md +++ b/community/app/src/pack/examples/01-simple-topology/README.md @@ -12,5 +12,5 @@ and test the connection. The simple topology example can be invoked using ``` - ../../bin/canton -c simple-topology-x.conf --bootstrap simple-ping.canton + ../../bin/canton -c simple-topology.conf --bootstrap simple-ping.canton ``` diff --git a/community/app/src/pack/examples/01-simple-topology/simple-topology-x-2.conf b/community/app/src/pack/examples/01-simple-topology/simple-topology-x-2.conf deleted file mode 100644 index aff5ae08c..000000000 --- a/community/app/src/pack/examples/01-simple-topology/simple-topology-x-2.conf +++ /dev/null @@ -1,13 +0,0 @@ -canton { - - features.enable-testing-commands = yes - features.enable-preview-commands = yes - - sequencers { - sequencer2 { } - } - - mediators { - mediator2 { } - } -} diff --git a/community/app/src/pack/examples/01-simple-topology/simple-topology-x.conf b/community/app/src/pack/examples/01-simple-topology/simple-topology.conf similarity index 100% rename from community/app/src/pack/examples/01-simple-topology/simple-topology-x.conf rename to community/app/src/pack/examples/01-simple-topology/simple-topology.conf diff --git a/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton b/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton index 9fa6b4f1d..ebcb12cd7 100644 --- a/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton +++ b/community/app/src/pack/examples/05-composability/composability-auto-transfer.canton @@ -23,9 +23,9 @@ val paintId = bootstrap.domain( // update parameters // disable automatic assignments iouDomainOwner.topology.domain_parameters - .set_transfer_exclusivity_timeout(iouId, 0 seconds) + .propose_update(iouId, _.update(transferExclusivityTimeout = 0 seconds)) paintDomainOwner.topology.domain_parameters - .set_transfer_exclusivity_timeout(paintId, 2 seconds) + .propose_update(paintId, _.update(transferExclusivityTimeout = 2 seconds)) // connect participants to the domains participant1.domains.connect_local(iou_sequencer, alias = iouAlias) diff --git a/community/app/src/pack/examples/05-composability/composability1.canton b/community/app/src/pack/examples/05-composability/composability1.canton index 6550c4b1f..f75a2a56e 100644 --- a/community/app/src/pack/examples/05-composability/composability1.canton +++ b/community/app/src/pack/examples/05-composability/composability1.canton @@ -25,9 +25,9 @@ val paintId = bootstrap.domain( // update parameters // disable automatic assignments iouDomainOwner.topology.domain_parameters - .set_transfer_exclusivity_timeout(iouId, 0 seconds) + .propose_update(iouId, _.update(transferExclusivityTimeout = 0 seconds)) paintDomainOwner.topology.domain_parameters - .set_transfer_exclusivity_timeout(paintId, 2 seconds) + .propose_update(paintId, _.update(transferExclusivityTimeout = 2 seconds)) // connect participants to the domains participant1.domains.connect_local(iou_sequencer, alias = iouAlias) diff --git a/community/app/src/pack/examples/05-composability/composability2.canton b/community/app/src/pack/examples/05-composability/composability2.canton index 50b6a9b4e..bc14c076e 100644 --- a/community/app/src/pack/examples/05-composability/composability2.canton +++ b/community/app/src/pack/examples/05-composability/composability2.canton @@ -24,9 +24,9 @@ val paintId = bootstrap.domain( // update parameters // disable automatic assignments iouDomainOwner.topology.domain_parameters - .set_transfer_exclusivity_timeout(iouId, 0 seconds) + .propose_update(iouId, _.update(transferExclusivityTimeout = 0 seconds)) paintDomainOwner.topology.domain_parameters - .set_transfer_exclusivity_timeout(paintId, 2 seconds) + .propose_update(paintId, _.update(transferExclusivityTimeout = 2 seconds)) // connect participants to the domains diff --git a/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala b/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala index bc95c6a0f..927d08d26 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/config/CantonCommunityConfigTest.scala @@ -21,11 +21,11 @@ import org.scalatest.wordspec.AnyWordSpec class CantonCommunityConfigTest extends AnyWordSpec with BaseTest { import scala.jdk.CollectionConverters.* - private val simpleConf = "examples/01-simple-topology/simple-topology-x.conf" + private val simpleConf = "examples/01-simple-topology/simple-topology.conf" "the example simple topology configuration" should { lazy val config = - loadFile(simpleConf).valueOrFail("failed to load simple-topology-x.conf") + loadFile(simpleConf).valueOrFail("failed to load simple-topology.conf") "contain a couple of participants" in { config.participants should have size 2 diff --git a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala index 9a45e8435..078713443 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/EnterpriseFeatureInCommunityIntegrationTest.scala @@ -25,7 +25,7 @@ sealed trait EnterpriseFeatureInCommunityXIntegrationTest private val domainAlias = "da" override def environmentDefinition: CommunityEnvironmentDefinition = - CommunityEnvironmentDefinition.simpleTopologyX + CommunityEnvironmentDefinition.simpleTopology .addConfigTransforms( CommunityConfigTransforms.uniquePorts ) diff --git a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala index cf79e6117..f9d2dae71 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/ExampleIntegrationTest.scala @@ -83,7 +83,7 @@ object ExampleIntegrationTest { } sealed abstract class SimplePingExampleXIntegrationTest - extends ExampleIntegrationTest(simpleTopology / "simple-topology-x.conf") { + extends ExampleIntegrationTest(simpleTopology / "simple-topology.conf") { "run simple-ping.canton successfully" in { implicit env => import env.* diff --git a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingXCommunityIntegrationTest.scala b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingXCommunityIntegrationTest.scala index 74a7f728a..87f91714a 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingXCommunityIntegrationTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/SimplestPingXCommunityIntegrationTest.scala @@ -21,7 +21,7 @@ sealed trait SimplestPingXCommunityIntegrationTest with SharedCommunityEnvironment { override def environmentDefinition: CommunityEnvironmentDefinition = - CommunityEnvironmentDefinition.simpleTopologyX + CommunityEnvironmentDefinition.simpleTopology .addConfigTransforms(CommunityConfigTransforms.uniquePorts) .withManualStart diff --git a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala index 6a179fb24..f0f2057d1 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/integration/tests/release/CliIntegrationTest.scala @@ -34,7 +34,7 @@ class CliXIntegrationTest extends FixtureAnyWordSpec with BaseTest with SuiteMix s"$resourceDir/config-snippets/disable-ammonite-cache.conf" private lazy val simpleConf = - "community/app/src/pack/examples/01-simple-topology/simple-topology-x.conf" + "community/app/src/pack/examples/01-simple-topology/simple-topology.conf" private lazy val unsupportedProtocolVersionConfig = "enterprise/app/src/test/resources/unsupported-protocol-version.conf" // this warning is potentially thrown when starting Canton with --no-tty @@ -200,7 +200,7 @@ class CliXIntegrationTest extends FixtureAnyWordSpec with BaseTest with SuiteMix val basicCommand = { // user-manual-entry-begin: SetNumThreads - "bin/canton -Dscala.concurrent.context.numThreads=12 --config examples/01-simple-topology/simple-topology-x.conf" + "bin/canton -Dscala.concurrent.context.numThreads=12 --config examples/01-simple-topology/simple-topology.conf" // user-manual-entry-end: SetNumThreads } val cmd = basicCommand + " --no-tty" diff --git a/community/app/src/test/scala/com/digitalasset/canton/metrics/LabeledMetricsFactoryTest.scala b/community/app/src/test/scala/com/digitalasset/canton/metrics/LabeledMetricsFactoryTest.scala index 145142309..ad15e38a1 100644 --- a/community/app/src/test/scala/com/digitalasset/canton/metrics/LabeledMetricsFactoryTest.scala +++ b/community/app/src/test/scala/com/digitalasset/canton/metrics/LabeledMetricsFactoryTest.scala @@ -12,7 +12,6 @@ class LabeledMetricsFactoryTest extends AnyWordSpec with BaseTest { "metrics factory" should { "generate valid documentation" in { val mf = MetricsRegistry( - false, OpenTelemetry.noop().getMeter("test"), MetricsFactoryType.InMemory(_ => new InMemoryMetricsFactory), ) diff --git a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto index dd75cbf1d..01552be37 100644 --- a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto +++ b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/domain_parameters.proto @@ -78,4 +78,5 @@ message DynamicDomainParameters { google.protobuf.Duration sequencer_aggregate_submission_timeout = 15; com.digitalasset.canton.protocol.v30.TrafficControlParameters traffic_control_parameters = 16; + AcsCommitmentsCatchUpConfig acs_commitments_catchup_config = 17; } diff --git a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto index 966731c1a..b562f94c9 100644 --- a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto +++ b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/sequencing.proto @@ -7,7 +7,6 @@ package com.digitalasset.canton.protocol.v30; import "com/digitalasset/canton/crypto/v30/crypto.proto"; import "com/digitalasset/canton/domain/api/v30/sequencer_service.proto"; -import "com/digitalasset/canton/protocol/v30/domain_parameters.proto"; import "com/digitalasset/canton/protocol/v30/signed_content.proto"; import "com/digitalasset/canton/v30/trace_context.proto"; import "google/protobuf/wrappers.proto"; @@ -83,7 +82,6 @@ message StaticDomainParameters { repeated com.digitalasset.canton.crypto.v30.HashAlgorithm required_hash_algorithms = 4; repeated com.digitalasset.canton.crypto.v30.CryptoKeyFormat required_crypto_key_formats = 5; int32 protocol_version = 6; - AcsCommitmentsCatchUpConfig acs_commitments_catch_up = 7; } message Envelope { diff --git a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto index 4744836ab..030f653cb 100644 --- a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto +++ b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/topology.proto @@ -12,11 +12,13 @@ import "scalapb/scalapb.proto"; message Enums { enum TopologyChangeOp { + TOPOLOGY_CHANGE_OP_UNSPECIFIED = 0; + // Adds a new or replaces an existing mapping - TOPOLOGY_CHANGE_OP_REPLACE_UNSPECIFIED = 0; + TOPOLOGY_CHANGE_OP_ADD_REPLACE = 1; // Remove an existing mapping - TOPOLOGY_CHANGE_OP_REMOVE = 1; + TOPOLOGY_CHANGE_OP_REMOVE = 2; } // enum indicating the participant permission level diff --git a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto index 76c67be64..73a34b5a2 100644 --- a/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto +++ b/community/base/src/main/protobuf/com/digitalasset/canton/protocol/v30/traffic_control_parameters.proto @@ -23,8 +23,10 @@ message TrafficControlParameters { // Window size used to compute the max sequencing time of a submission request // This impacts how quickly a submission is expected to be accepted before a retry should be attempted by the caller - // Default is 5 minutes google.protobuf.Duration set_balance_request_submission_window_size = 5; + + // If true, submission requests without enough traffic credit will not be delivered + bool enforce_rate_limiting = 6; } message SetTrafficBalanceMessage { diff --git a/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala b/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala index f9257bcaf..b750a33a1 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/ProtoDeserializationError.scala @@ -48,7 +48,6 @@ object ProtoDeserializationError extends ProtoDeserializationErrorGroup { override val message = s"Field `$field` is not set" } final case class TimestampConversionError(message: String) extends ProtoDeserializationError - final case class TimeModelConversionError(message: String) extends ProtoDeserializationError final case class ValueConversionError(field: String, error: String) extends ProtoDeserializationError { override val message = s"Unable to convert field `$field`: $error" diff --git a/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala b/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala index c1daa9caf..f809da592 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/config/ProcessingTimeouts.scala @@ -74,8 +74,6 @@ object DefaultProcessingTimeouts { val activeInitRetryDelay: NonNegativeDuration = NonNegativeDuration.tryFromDuration(50.millis) - val warnUnbounded: NonNegativeDuration = NonNegativeDuration.tryFromDuration(30.seconds) - val slowFutureWarn: NonNegativeDuration = NonNegativeDuration.tryFromDuration(5.seconds) val sequencerInfo: NonNegativeDuration = NonNegativeDuration.tryFromDuration(30.seconds) diff --git a/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala b/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala index 36c685c41..70491009f 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/config/ServerConfig.scala @@ -223,8 +223,6 @@ sealed trait BaseTlsArguments { * optional or unsupported. * If client authentication is enabled and this parameter is absent, * the certificates in the JVM trust store will be used instead. - * @param secretsUrl URL of a secrets service that provide parameters needed to decrypt the private key. - * Required when private key is encrypted (indicated by '.enc' filename suffix). * @param clientAuth indicates whether server requires, requests, does does not request auth from clients. * Normally the ledger api server requires client auth under TLS, but using this setting this * requirement can be loosened. @@ -241,7 +239,6 @@ final case class TlsServerConfig( certChainFile: ExistingFile, privateKeyFile: ExistingFile, trustCollectionFile: Option[ExistingFile] = None, - secretsUrl: Option[String] = None, clientAuth: ServerAuthRequirementConfig = ServerAuthRequirementConfig.Optional, minimumServerProtocolVersion: Option[String] = Some( TlsServerConfig.defaultMinimumServerProtocol diff --git a/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala b/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala index 48216a044..c94ce27ff 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/crypto/CryptoApi.scala @@ -68,7 +68,8 @@ class Crypto( ) } yield publicKey - override def onClosed(): Unit = Lifecycle.close(cryptoPrivateStore, cryptoPublicStore)(logger) + override def onClosed(): Unit = + Lifecycle.close(privateCrypto, cryptoPrivateStore, cryptoPublicStore)(logger) } trait CryptoPureApi @@ -89,7 +90,7 @@ object CryptoPureApiError { } } -trait CryptoPrivateApi extends EncryptionPrivateOps with SigningPrivateOps +trait CryptoPrivateApi extends EncryptionPrivateOps with SigningPrivateOps with AutoCloseable trait CryptoPrivateStoreApi extends CryptoPrivateApi with EncryptionPrivateStoreOps diff --git a/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala b/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala index 4b7472346..c1667b65a 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/crypto/Encryption.scala @@ -143,7 +143,7 @@ trait EncryptionPrivateStoreOps extends EncryptionPrivateOps { deserialize: ByteString => Either[DeserializationError, M] )(implicit tc: TraceContext): EitherT[Future, DecryptionError, M] = store - .decryptionKey(encryptedMessage.encryptedFor)(TraceContext.todo) + .decryptionKey(encryptedMessage.encryptedFor) .leftMap(storeError => DecryptionError.KeyStoreError(storeError.show)) .subflatMap(_.toRight(DecryptionError.UnknownEncryptionKey(encryptedMessage.encryptedFor))) .subflatMap(encryptionKey => diff --git a/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala b/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala index 4a08c7387..8430902ae 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/data/RepairContract.scala @@ -3,18 +3,18 @@ package com.digitalasset.canton.data -import com.digitalasset.canton.TransferCounterO +import com.digitalasset.canton.TransferCounter import com.digitalasset.canton.protocol.SerializableContract import com.digitalasset.canton.topology.PartyId /** Serializable contract with witnesses for contract add/import used in admin repairs. * * @param contract serializable contract - * @param witnesses optional witnesses that observe the creation of the contract - * @param transferCounter optional reassignment counter for the given [[contract]] + * @param witnesses witnesses that observe the creation of the contract + * @param transferCounter reassignment counter for the given [[contract]] */ final case class RepairContract( contract: SerializableContract, witnesses: Set[PartyId], - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) diff --git a/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala b/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala index fb1fcb106..13cd2e31e 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/data/TransferInViewTree.scala @@ -27,7 +27,7 @@ import com.digitalasset.canton.topology.{DomainId, ParticipantId} import com.digitalasset.canton.util.EitherUtil import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} import com.digitalasset.canton.version.* -import com.digitalasset.canton.{LfPartyId, LfWorkflowId, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{LfPartyId, LfWorkflowId, TransferCounter} import com.google.protobuf.ByteString import java.util.UUID @@ -248,8 +248,7 @@ final case class TransferInView private ( creatingTransactionId: TransactionId, transferOutResultEvent: DeliveredTransferOutResult, sourceProtocolVersion: SourceProtocolVersion, - // TODO(#15179) Remove the option - transferCounter: TransferCounterO, + transferCounter: TransferCounter, )( hashOps: HashOps, override val representativeProtocolVersion: RepresentativeProtocolVersion[TransferInView.type], @@ -269,13 +268,7 @@ final case class TransferInView private ( creatingTransactionId = creatingTransactionId.toProtoPrimitive, transferOutResultEvent = Some(transferOutResultEvent.result.toProtoV30), sourceProtocolVersion = sourceProtocolVersion.v.toProtoPrimitive, - transferCounter = transferCounter - .getOrElse( - throw new IllegalStateException( - s"Transfer counter must be defined at representative protocol version $representativeProtocolVersion" - ) - ) - .toProtoPrimitive, + transferCounter = transferCounter.toProtoPrimitive, ) override protected[this] def toByteStringUnmemoized: ByteString = @@ -283,7 +276,7 @@ final case class TransferInView private ( override def pretty: Pretty[TransferInView] = prettyOfClass( param("contract", _.contract), // TODO(#3269) this may contain confidential data - paramIfDefined("transfer counter", _.transferCounter), + param("transfer counter", _.transferCounter), param("creating transaction id", _.creatingTransactionId), param("transfer out result", _.transferOutResultEvent), param("salt", _.salt), @@ -347,7 +340,7 @@ object TransferInView transferOutResultEvent: DeliveredTransferOutResult, sourceProtocolVersion: SourceProtocolVersion, targetProtocolVersion: TargetProtocolVersion, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ): Either[String, TransferInView] = Either .catchOnly[IllegalArgumentException]( TransferInView( @@ -392,7 +385,7 @@ object TransferInView commonData.creatingTransactionId, commonData.transferOutResultEvent, commonData.sourceProtocolVersion, - Some(TransferCounter(transferCounterP)), + TransferCounter(transferCounterP), )(hashOps, rpv, Some(bytes)) } } @@ -420,7 +413,7 @@ final case class FullTransferInTree(tree: TransferInViewTree) def contract: SerializableContract = view.contract - def transferCounter: TransferCounterO = view.transferCounter + def transferCounter: TransferCounter = view.transferCounter def creatingTransactionId: TransactionId = view.creatingTransactionId diff --git a/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala b/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala index 80ec9b35a..524800358 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/data/TransferOutViewTree.scala @@ -18,7 +18,7 @@ import com.digitalasset.canton.topology.{DomainId, ParticipantId} import com.digitalasset.canton.util.EitherUtil import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} import com.digitalasset.canton.version.* -import com.digitalasset.canton.{LfPartyId, LfWorkflowId, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{LfPartyId, LfWorkflowId, TransferCounter} import com.google.protobuf.ByteString import java.util.UUID @@ -339,7 +339,7 @@ object TransferOutView targetTimeProof: TimeProof, sourceProtocolVersion: SourceProtocolVersion, targetProtocolVersion: TargetProtocolVersion, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ): TransferOutView = TransferOutView( salt, @@ -348,7 +348,7 @@ object TransferOutView targetDomain, targetTimeProof, targetProtocolVersion, - transferCounter.getOrElse(throw new IllegalArgumentException("Missing transfer counter.")), + transferCounter, )(hashOps, protocolVersionRepresentativeFor(sourceProtocolVersion.v), None) private[this] def fromProtoV30(hashOps: HashOps, transferOutViewP: v30.TransferOutView)( diff --git a/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala b/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala index bc625b52a..7bd7460ba 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/health/ComponentHealthState.scala @@ -23,6 +23,10 @@ sealed trait ComponentHealthState extends ToComponentHealthState with PrettyPrin case ComponentHealthState.Ok(_) => true case _ => false } + def isDegrading: Boolean = this match { + case ComponentHealthState.Degraded(_) => true + case _ => false + } def isFailed: Boolean = this match { case ComponentHealthState.Failed(_) => true case _ => false diff --git a/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala b/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala index 41066e144..700d32142 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/health/HealthComponent.scala @@ -13,6 +13,8 @@ trait HealthQuasiComponent extends HealthElement { override protected def prettyState: Pretty[State] = Pretty[State] def isFailed: Boolean = getState.toComponentHealthState.isFailed + def isOk: Boolean = getState.toComponentHealthState.isOk + def isDegrading: Boolean = getState.toComponentHealthState.isDegrading def toComponentStatus: ComponentStatus = ComponentStatus(name, getState.toComponentHealthState) override def closingState: State diff --git a/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala b/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala index 20ef9d86c..719d5eeaf 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/lifecycle/FutureUnlessShutdown.scala @@ -11,7 +11,11 @@ import com.daml.metrics.api.MetricHandle.Timer import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.canton.util.{LoggerUtil, Thereafter} -import com.digitalasset.canton.{DoNotDiscardLikeFuture, DoNotTraverseLikeFuture} +import com.digitalasset.canton.{ + DoNotDiscardLikeFuture, + DoNotReturnFromSynchronizedLikeFuture, + DoNotTraverseLikeFuture, +} import scala.concurrent.{Awaitable, ExecutionContext, Future} import scala.util.chaining.* @@ -85,6 +89,7 @@ sealed abstract class FutureUnlessShutdownImpl { */ @DoNotDiscardLikeFuture @DoNotTraverseLikeFuture + @DoNotReturnFromSynchronizedLikeFuture type FutureUnlessShutdown[+A] <: Awaitable[UnlessShutdown[A]] /** Methods to evidence that [[FutureUnlessShutdown]] and [[scala.concurrent.Future]]`[`[[UnlessShutdown]]`]` diff --git a/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricValue.scala b/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricValue.scala index 4f10311af..3ca3b9e0b 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricValue.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/metrics/MetricValue.scala @@ -49,8 +49,8 @@ object MetricValue { import scala.jdk.CollectionConverters.* - implicit val prettyValueAtPercentile: Pretty[ValueAtPercentile] = prettyOfClass( - param("percentile", _.getPercentile.toString.unquoted), + implicit val prettyValueAtPercentile: Pretty[ValueAtQuantile] = prettyOfClass( + param("percentile", _.getQuantile.toString.unquoted), param("value", _.getValue.toString.unquoted), ) @@ -102,13 +102,13 @@ object MetricValue { final case class Summary( sum: Double, count: Long, - percentiles: Seq[ValueAtPercentile], + quantiles: Seq[ValueAtQuantile], attributes: Map[String, String], ) extends MetricValue { override def pretty: Pretty[Summary] = prettyOfClass( param("sum", _.sum.toString.unquoted), param("count", _.count), - param("percentiles", _.percentiles), + param("quantiles", _.quantiles), param( "attributes", _.attributes, @@ -116,12 +116,12 @@ object MetricValue { ) override def toCsvHeader(data: MetricData): String = { - (Seq("timestamp", "sum", "count") ++ percentiles.map(_.getPercentile).map(x => s"p$x%2.0f")) + (Seq("timestamp", "sum", "count") ++ quantiles.map(_.getQuantile).map(x => s"p$x%2.0f")) .mkString(",") } override def toCsvRow(ts: CantonTimestamp, data: MetricData): String = { - (Seq(ts.getEpochSecond.toString, sum.toString, count.toString) ++ percentiles.map( + (Seq(ts.getEpochSecond.toString, sum.toString, count.toString) ++ quantiles.map( _.getValue.toString )) .mkString(",") @@ -193,7 +193,7 @@ object MetricValue { Summary( data.getSum, data.getCount, - data.getPercentileValues.asScala.toSeq, + data.getValues.asScala.toSeq, mapAttributes(data.getAttributes), ) } diff --git a/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala b/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala index 2fc1b4fb8..0350b4eeb 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/metrics/SequencerClientMetrics.scala @@ -50,7 +50,7 @@ class SequencerClientMetrics( |too slow to keep up with the messaging load.""", qualification = Debug, ) - val delay: Gauge[Long] = metricsFactory.gauge(prefix :+ "delay", 0L)(MetricsContext.Empty) + val delay: Gauge[Long] = metricsFactory.gauge(prefix :+ "delay", 0L) @MetricDoc.Tag( summary = @@ -89,7 +89,7 @@ class SequencerClientMetrics( qualification = Saturation, ) val maxInFlightEventBatches: Gauge[Int] = - metricsFactory.gauge(prefix :+ "max-in-flight-event-batches", 0)(MetricsContext.Empty) + metricsFactory.gauge(prefix :+ "max-in-flight-event-batches", 0) } object submissions { diff --git a/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala b/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala index f86ada931..403518688 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/networking/Endpoint.scala @@ -21,6 +21,10 @@ final case class Endpoint(host: String, port: Port) { } object Endpoint { + + implicit val endpointOrdering: Ordering[Endpoint] = + Ordering.by(_.toString) + val ATTR_ENDPOINT: Attributes.Key[Endpoint] = Attributes.Key.create("com.digitalasset.canton.networking.Endpoint") diff --git a/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala b/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala index 0501be9a4..6d72e25ee 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonGrpcUtil.scala @@ -61,7 +61,7 @@ object CantonGrpcUtil { ): EitherT[Future, StatusRuntimeException, C] = value.leftMap(_.asGrpcError) - def mapErrNewETUS[T <: CantonError, C](value: EitherT[FutureUnlessShutdown, T, C])(implicit + def mapErrNewETUS[T <: BaseCantonError, C](value: EitherT[FutureUnlessShutdown, T, C])(implicit ec: ExecutionContext, errorLoggingContext: ErrorLoggingContext, ): EitherT[Future, StatusRuntimeException, C] = @@ -78,7 +78,7 @@ object CantonGrpcUtil { ): Future[C] = EitherTUtil.toFuture(value.leftMap(_.asGrpcError)) - def mapErrNewEUS[T <: CantonError, C](value: EitherT[FutureUnlessShutdown, T, C])(implicit + def mapErrNewEUS[T <: BaseCantonError, C](value: EitherT[FutureUnlessShutdown, T, C])(implicit ec: ExecutionContext, errorLoggingContext: ErrorLoggingContext, ): Future[C] = diff --git a/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala b/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala index fa964fb5f..5855557fc 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/networking/grpc/CantonServerBuilder.scala @@ -185,13 +185,6 @@ object CantonServerBuilder { def baseSslContext(config: TlsBaseServerConfig): SslContext = baseSslBuilder(config).build() def sslContext(config: TlsServerConfig): SslContext = { - // TODO(#7086): secrets service support not yet implemented for canton admin services - config.secretsUrl.foreach { url => - throw new IllegalArgumentException( - s"Canton admin services do not yet support 'Secrets Service' $url." - ) - } - val s1 = baseSslBuilder(config) val s2 = config.trustCollectionFile.fold(s1)(trustCollection => s1.trustManager(trustCollection.unwrap) diff --git a/community/base/src/main/scala/com/digitalasset/canton/package.scala b/community/base/src/main/scala/com/digitalasset/canton/package.scala index 7222be5c9..88c7de23c 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/package.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/package.scala @@ -119,10 +119,6 @@ package object canton { ) } - /** A transfer counter. - */ - type TransferCounterO = Option[TransferCounter] - object RequestCounter extends CounterCompanion[RequestCounterDiscriminator] /** Wrap a method call with this method to document that the caller is sure that the callee's preconditions are met. */ diff --git a/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala b/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala index aa2ea57cc..630435267 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/protocol/DomainParameters.scala @@ -72,7 +72,6 @@ final case class StaticDomainParameters private ( requiredHashAlgorithms: NonEmpty[Set[HashAlgorithm]], requiredCryptoKeyFormats: NonEmpty[Set[CryptoKeyFormat]], protocolVersion: ProtocolVersion, - acsCommitmentsCatchUp: Option[AcsCommitmentsCatchUpConfig], ) extends HasProtocolVersionedWrapper[StaticDomainParameters] { override val representativeProtocolVersion: RepresentativeProtocolVersion[ @@ -93,12 +92,14 @@ final case class StaticDomainParameters private ( requiredHashAlgorithms = requiredHashAlgorithms.toSeq.map(_.toProtoEnum), requiredCryptoKeyFormats = requiredCryptoKeyFormats.toSeq.map(_.toProtoEnum), protocolVersion = protocolVersion.toProtoPrimitive, - acsCommitmentsCatchUp = acsCommitmentsCatchUp.map(_.toProtoV30), ) } object StaticDomainParameters extends HasProtocolVersionedCompanion[StaticDomainParameters] with ProtocolVersionedCompanionDbHelpers[StaticDomainParameters] { + + // Note: if you need static domain parameters for testing, look at BaseTest.defaultStaticDomainParametersWith + val supportedProtoVersions: protocol.StaticDomainParameters.SupportedProtoVersions = SupportedProtoVersions( ProtoVersion(30) -> VersionedProtoConverter(ProtocolVersion.v30)( @@ -118,7 +119,6 @@ object StaticDomainParameters requiredHashAlgorithms: NonEmpty[Set[HashAlgorithm]], requiredCryptoKeyFormats: NonEmpty[Set[CryptoKeyFormat]], protocolVersion: ProtocolVersion, - acsCommitmentsCatchUp: Option[AcsCommitmentsCatchUpConfig], ): StaticDomainParameters = StaticDomainParameters( requiredSigningKeySchemes = requiredSigningKeySchemes, requiredEncryptionKeySchemes = requiredEncryptionKeySchemes, @@ -126,7 +126,6 @@ object StaticDomainParameters requiredHashAlgorithms = requiredHashAlgorithms, requiredCryptoKeyFormats = requiredCryptoKeyFormats, protocolVersion = protocolVersion, - acsCommitmentsCatchUp = acsCommitmentsCatchUp, ) private def requiredKeySchemes[P, A]( @@ -146,7 +145,6 @@ object StaticDomainParameters requiredHashAlgorithmsP, requiredCryptoKeyFormatsP, protocolVersionP, - acsCommitmentsCatchUpP, ) = domainParametersP for { @@ -176,9 +174,6 @@ object StaticDomainParameters CryptoKeyFormat.fromProtoEnum, ) protocolVersion <- ProtocolVersion.fromProtoPrimitive(protocolVersionP) - acsCommitmentsCatchUp <- acsCommitmentsCatchUpP.traverse( - AcsCommitmentsCatchUpConfig.fromProtoV30 - ) } yield StaticDomainParameters( requiredSigningKeySchemes, requiredEncryptionKeySchemes, @@ -186,7 +181,6 @@ object StaticDomainParameters requiredHashAlgorithms, requiredCryptoKeyFormats, protocolVersion, - acsCommitmentsCatchUp, ) } } @@ -298,6 +292,14 @@ object OnboardingRestriction { * Must be greater than `maxSequencingTime` specified by a participant, * practically also requires extra slack to allow clock skew between participant and sequencer. * @param onboardingRestriction current onboarding restrictions for participants + * @param catchUpParameters Optional parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]]. + * Defined starting with protobuf version v2 and protocol version v30. + * If None, the catch-up mode is disabled: the participant does not trigger the + * catch-up mode when lagging behind. + * If not None, it specifies the number of reconciliation intervals that the + * participant skips in catch-up mode, and the number of catch-up intervals + * intervals a participant should lag behind in order to enter catch-up mode. + * * @throws DynamicDomainParameters$.InvalidDynamicDomainParameters * if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`. */ @@ -314,6 +316,7 @@ final case class DynamicDomainParameters private ( sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, trafficControlParameters: Option[TrafficControlParameters], onboardingRestriction: OnboardingRestriction, + acsCommitmentsCatchUpConfig: Option[AcsCommitmentsCatchUpConfig], )( override val representativeProtocolVersion: RepresentativeProtocolVersion[ DynamicDomainParameters.type @@ -372,6 +375,8 @@ final case class DynamicDomainParameters private ( sequencerAggregateSubmissionTimeout, trafficControlParameters: Option[TrafficControlParameters] = trafficControlParameters, onboardingRestriction: OnboardingRestriction = onboardingRestriction, + acsCommitmentsCatchUpConfigParameter: Option[AcsCommitmentsCatchUpConfig] = + acsCommitmentsCatchUpConfig, ): DynamicDomainParameters = DynamicDomainParameters.tryCreate( confirmationResponseTimeout = confirmationResponseTimeout, mediatorReactionTimeout = mediatorReactionTimeout, @@ -385,6 +390,7 @@ final case class DynamicDomainParameters private ( sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, trafficControlParameters = trafficControlParameters, onboardingRestriction = onboardingRestriction, + acsCommitmentsCatchUpConfigParameter = acsCommitmentsCatchUpConfigParameter, )(representativeProtocolVersion) def toProtoV30: v30.DynamicDomainParameters = v30.DynamicDomainParameters( @@ -407,6 +413,7 @@ final case class DynamicDomainParameters private ( sequencerAggregateSubmissionTimeout = Some(sequencerAggregateSubmissionTimeout.toProtoPrimitive), trafficControlParameters = trafficControlParameters.map(_.toProtoV30), + acsCommitmentsCatchupConfig = acsCommitmentsCatchUpConfig.map(_.toProtoV30), ) // TODO(#14052) add topology limits @@ -434,6 +441,7 @@ final case class DynamicDomainParameters private ( param("max request size", _.maxRequestSize.value), param("sequencer aggregate submission timeout", _.sequencerAggregateSubmissionTimeout), paramIfDefined("traffic control config", _.trafficControlParameters), + paramIfDefined("ACS commitment catchup config", _.acsCommitmentsCatchUpConfig), ) } else { prettyOfClass( @@ -500,6 +508,9 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma private val defaultOnboardingRestriction: OnboardingRestriction = OnboardingRestriction.UnrestrictedOpen + private val defaultAcsCommitmentsCatchUp: Option[AcsCommitmentsCatchUpConfig] = + Option.empty[AcsCommitmentsCatchUpConfig] + /** Safely creates DynamicDomainParameters. * * @return `Left(...)` if `mediatorDeduplicationTimeout` is less than twice of `ledgerTimeRecordTimeTolerance`. @@ -517,6 +528,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, trafficControlConfig: Option[TrafficControlParameters], onboardingRestriction: OnboardingRestriction, + acsCommitmentsCatchUpConfig: Option[AcsCommitmentsCatchUpConfig], )( representativeProtocolVersion: RepresentativeProtocolVersion[DynamicDomainParameters.type] ): Either[InvalidDynamicDomainParameters, DynamicDomainParameters] = @@ -534,6 +546,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma sequencerAggregateSubmissionTimeout, trafficControlConfig, onboardingRestriction, + acsCommitmentsCatchUpConfig, )(representativeProtocolVersion) ) @@ -554,6 +567,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma sequencerAggregateSubmissionTimeout: NonNegativeFiniteDuration, trafficControlParameters: Option[TrafficControlParameters], onboardingRestriction: OnboardingRestriction, + acsCommitmentsCatchUpConfigParameter: Option[AcsCommitmentsCatchUpConfig], )( representativeProtocolVersion: RepresentativeProtocolVersion[DynamicDomainParameters.type] ): DynamicDomainParameters = { @@ -570,6 +584,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma sequencerAggregateSubmissionTimeout, trafficControlParameters, onboardingRestriction, + acsCommitmentsCatchUpConfigParameter, )(representativeProtocolVersion) } @@ -607,6 +622,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma sequencerAggregateSubmissionTimeout = defaultSequencerAggregateSubmissionTimeout, trafficControlParameters = defaultTrafficControlParameters, onboardingRestriction = defaultOnboardingRestriction, + acsCommitmentsCatchUpConfigParameter = defaultAcsCommitmentsCatchUp, )( protocolVersionRepresentativeFor(protocolVersion) ) @@ -637,6 +653,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, trafficControlParameters = defaultTrafficControlParameters, onboardingRestriction = defaultOnboardingRestriction, + acsCommitmentsCatchUpConfigParameter = defaultAcsCommitmentsCatchUp, )( protocolVersionRepresentativeFor(protocolVersion) ) @@ -671,6 +688,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma _partyHostingLimits, sequencerAggregateSubmissionTimeoutP, trafficControlConfigP, + acsCommitmentCatchupConfigP, ) = domainParametersP for { @@ -736,6 +754,10 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma onboardingRestriction <- OnboardingRestriction.fromProtoV30(onboardingRestrictionP) rpv <- protocolVersionRepresentativeFor(ProtoVersion(30)) + acsCommitmentCatchupConfig <- acsCommitmentCatchupConfigP.traverse( + AcsCommitmentsCatchUpConfig.fromProtoV30 + ) + domainParameters <- create( confirmationResponseTimeout = confirmationResponseTimeout, @@ -750,6 +772,7 @@ object DynamicDomainParameters extends HasProtocolVersionedCompanion[DynamicDoma sequencerAggregateSubmissionTimeout = sequencerAggregateSubmissionTimeout, trafficControlConfig = trafficControlConfig, onboardingRestriction = onboardingRestriction, + acsCommitmentsCatchUpConfig = acsCommitmentCatchupConfig, )(rpv).leftMap(_.toProtoDeserializationError) } yield domainParameters } diff --git a/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala b/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala index d521b7ab6..fe9eb3d0c 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/protocol/HasSerializableContract.scala @@ -3,12 +3,12 @@ package com.digitalasset.canton.protocol -import com.digitalasset.canton.TransferCounterO +import com.digitalasset.canton.TransferCounter trait HasSerializableContract { def contract: SerializableContract - def transferCounter: TransferCounterO + def transferCounter: TransferCounter } diff --git a/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala b/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala index cf8a1a14b..2d0e13ee2 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/protocol/package.scala @@ -32,8 +32,6 @@ package object protocol { type LfTransaction = Transaction val LfTransaction: Transaction.type = Transaction - val LfTransactionErrors: TransactionErrors.type = TransactionErrors - type LfVersionedTransaction = VersionedTransaction val LfVersionedTransaction: VersionedTransaction.type = VersionedTransaction diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala index 6b81da421..c6ee0b9fd 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerAggregator.scala @@ -124,6 +124,7 @@ class SequencerAggregator( ): Either[SequencerAggregatorError, Unit] = combine(messages).map(addEventToQueue) + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) def combineAndMergeEvent( sequencerId: SequencerId, message: OrdinarySerializedEvent, diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala index 3a4e57179..c54f5d8b6 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnection.scala @@ -83,8 +83,9 @@ final case class GrpcSequencerConnection( override def pretty: Pretty[GrpcSequencerConnection] = prettyOfClass( + param("sequencerAlias", _.sequencerAlias), param("endpoints", _.endpoints.map(_.toURI(transportSecurity)).toList), - param("transportSecurity", _.transportSecurity), + paramIfTrue("transportSecurity", _.transportSecurity), paramIfTrue("customTrustCertificates", _.customTrustCertificates.nonEmpty), ) diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala index 982a5316e..850204267 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/SequencerConnections.scala @@ -204,3 +204,37 @@ object SequencerConnections ) ) } + +sealed trait SequencerConnectionValidation { + def toProtoV30: v30.SequencerConnectionValidation +} +object SequencerConnectionValidation { + object Disabled extends SequencerConnectionValidation { + override val toProtoV30: v30.SequencerConnectionValidation = + v30.SequencerConnectionValidation.DISABLED + } + object All extends SequencerConnectionValidation { + override val toProtoV30: v30.SequencerConnectionValidation = + v30.SequencerConnectionValidation.ALL + } + object Active extends SequencerConnectionValidation { + override val toProtoV30: v30.SequencerConnectionValidation = + v30.SequencerConnectionValidation.ACTIVE + } + + def fromProtoV30( + proto: v30.SequencerConnectionValidation + ): ParsingResult[SequencerConnectionValidation] = + proto match { + case v30.SequencerConnectionValidation.DISABLED => Right(Disabled) + case v30.SequencerConnectionValidation.ALL => Right(All) + case v30.SequencerConnectionValidation.ACTIVE => Right(Active) + case _ => + Left( + ProtoDeserializationError.ValueConversionError( + "sequencer_connection_validation", + s"Unknown value: $proto", + ) + ) + } +} diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala index c12e2b76d..6425ff530 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/TrafficControlParameters.scala @@ -8,6 +8,7 @@ import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.protocol.v30 as protoV30 import com.digitalasset.canton.sequencing.TrafficControlParameters.{ DefaultBaseTrafficAmount, + DefaultEnforceRateLimiting, DefaultMaxBaseTrafficAccumulationDuration, DefaultReadVsWriteScalingFactor, DefaultSetBalanceRequestSubmissionWindowSize, @@ -33,6 +34,7 @@ final case class TrafficControlParameters( DefaultMaxBaseTrafficAccumulationDuration, setBalanceRequestSubmissionWindowSize: PositiveFiniteDuration = DefaultSetBalanceRequestSubmissionWindowSize, + enforceRateLimiting: Boolean = DefaultEnforceRateLimiting, ) extends PrettyPrinting { lazy val baseRate: NonNegativeLong = NonNegativeLong.tryCreate( @@ -44,6 +46,7 @@ final case class TrafficControlParameters( Some(maxBaseTrafficAccumulationDuration.toProtoPrimitive), readVsWriteScalingFactor.value, Some(setBalanceRequestSubmissionWindowSize.toProtoPrimitive), + enforceRateLimiting, ) override def pretty: Pretty[TrafficControlParameters] = prettyOfClass( @@ -51,6 +54,7 @@ final case class TrafficControlParameters( param("read vs write scaling factor", _.readVsWriteScalingFactor), param("max base traffic accumulation duration", _.maxBaseTrafficAccumulationDuration), param("set balance request submission window size", _.setBalanceRequestSubmissionWindowSize), + param("enforce rate limiting", _.enforceRateLimiting), ) } @@ -63,6 +67,7 @@ object TrafficControlParameters { time.NonNegativeFiniteDuration.apply(time.PositiveSeconds.tryOfMinutes(10L)) val DefaultSetBalanceRequestSubmissionWindowSize: time.PositiveFiniteDuration = time.PositiveFiniteDuration.tryOfMinutes(4L) + val DefaultEnforceRateLimiting: Boolean = true def fromProtoV30( proto: protoV30.TrafficControlParameters @@ -87,6 +92,7 @@ object TrafficControlParameters { scalingFactor, maxBaseTrafficAccumulationDuration, setBalanceRequestSubmissionWindowSize, + proto.enforceRateLimiting, ) } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala index bdba878c2..a97d1c4e2 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/authentication/grpc/AuthenticationTokenManager.scala @@ -18,7 +18,7 @@ import com.digitalasset.canton.util.Thereafter.syntax.* import io.grpc.Status import java.util.concurrent.atomic.AtomicReference -import scala.concurrent.{ExecutionContext, Future, Promise, blocking} +import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success} final case class AuthenticationTokenWithExpiry( @@ -53,21 +53,8 @@ class AuthenticationTokenManager( * If there is no token it will cause a token refresh to start and be completed once obtained. * If there is a refresh already in progress it will be completed with this refresh. */ - def getToken: EitherT[Future, Status, AuthenticationToken] = blocking { - // updates must be synchronized, as we are triggering refreshes from here - // and the AtomicReference.updateAndGet requires the update to be side-effect free - synchronized { - state.get() match { - // we are already refreshing, so pass future result - case Refreshing(pending) => pending.map(_.token) - // we have a token, so share it - case HaveToken(token) => EitherT.rightT[Future, Status](token) - // there is no token yet, so start refreshing and return pending result - case NoToken => - createRefreshTokenFuture() - } - } - } + def getToken: EitherT[Future, Status, AuthenticationToken] = + refreshToken(refreshWhenHaveToken = false) /** Invalid the current token if it matches the provided value. * Although unlikely, the token must be provided here in case a response terminates after a new token has already been generated. @@ -79,22 +66,45 @@ class AuthenticationTokenManager( } } - private def createRefreshTokenFuture(): EitherT[Future, Status, AuthenticationToken] = { - implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - val syncP = Promise[Unit]() - val refresh = EitherT.right(syncP.future).flatMap(_ => obtainToken(traceContext)) + private def refreshToken( + refreshWhenHaveToken: Boolean + ): EitherT[Future, Status, AuthenticationToken] = { + val refreshTokenPromise = Promise[Either[Status, AuthenticationTokenWithExpiry]]() + val refreshingState = Refreshing(EitherT(refreshTokenPromise.future)) + + state.getAndUpdate { + case NoToken => refreshingState + case have @ HaveToken(_) => if (refreshWhenHaveToken) refreshingState else have + case other => other + } match { + // we are already refreshing, so pass future result + case Refreshing(pending) => pending.map(_.token) + // we have a token, so share it + case HaveToken(token) => + if (refreshWhenHaveToken) createRefreshTokenFuture(refreshTokenPromise) + else EitherT.rightT[Future, Status](token) + // there is no token yet, so start refreshing and return pending result + case NoToken => + createRefreshTokenFuture(refreshTokenPromise) + } + } + private def createRefreshTokenFuture( + promise: Promise[Either[Status, AuthenticationTokenWithExpiry]] + ): EitherT[Future, Status, AuthenticationToken] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext logger.debug("Refreshing authentication token") + val currentRefresh = promise.future def completeRefresh(result: State): Unit = { state.updateAndGet { - case Refreshing(pending) if pending == refresh => result + case Refreshing(pending) if pending.value == currentRefresh => result case other => other }.discard } // asynchronously update the state once completed, one way or another - val refreshTransformed = refresh.value.thereafter { + val currentRefreshTransformed = currentRefresh.thereafter { case Failure(exception) => exception match { case ex: io.grpc.StatusRuntimeException @@ -121,15 +131,12 @@ class AuthenticationTokenManager( completeRefresh(NoToken) case Success(Right(AuthenticationTokenWithExpiry(newToken, expiresAt))) => logger.debug("Token refresh complete") - scheduleRefreshBefore(expiresAt) completeRefresh(HaveToken(newToken)) + scheduleRefreshBefore(expiresAt) } - val res = Refreshing(refresh) - state.set(res) - // only kick off computation once the state is set - syncP.success(()) - EitherT(refreshTransformed).map(_.token) + promise.completeWith(obtainToken(traceContext).value) + EitherT(currentRefreshTransformed).map(_.token) } private def scheduleRefreshBefore(expiresAt: CantonTimestamp): Unit = { @@ -144,7 +151,7 @@ class AuthenticationTokenManager( } private def backgroundRefreshToken(_now: CantonTimestamp): Unit = if (!isClosed) { - createRefreshTokenFuture().discard + refreshToken(refreshWhenHaveToken = true).discard } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala index b60eb20e4..0c048a153 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientFactory.scala @@ -10,12 +10,7 @@ import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.* import com.digitalasset.canton.crypto.{Crypto, SyncCryptoApi, SyncCryptoClient} -import com.digitalasset.canton.lifecycle.* -import com.digitalasset.canton.logging.{ - ErrorLoggingContext, - NamedLoggerFactory, - NamedLoggingContext, -} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLoggingContext} import com.digitalasset.canton.metrics.SequencerClientMetrics import com.digitalasset.canton.networking.Endpoint import com.digitalasset.canton.networking.grpc.ClientChannelBuilder @@ -322,21 +317,5 @@ object SequencerClientFactory { ) } - def validateTransport( - connection: SequencerConnection, - logWarning: Boolean, - )(implicit - executionContext: ExecutionContextExecutor, - errorLoggingContext: ErrorLoggingContext, - closeContext: CloseContext, - ): EitherT[FutureUnlessShutdown, String, Unit] = - SequencerClientTransportFactory.validateTransport( - connection, - traceContextPropagation, - config, - logWarning, - loggerFactory, - ) - } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala index dbd724f2f..6a5bf9dd9 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerClientTransportFactory.scala @@ -7,20 +7,11 @@ import cats.data.EitherT import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.nonempty.{NonEmpty, NonEmptyUtil} import com.digitalasset.canton.SequencerAlias -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.lifecycle.* -import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory} -import com.digitalasset.canton.networking.grpc.ClientChannelBuilder import com.digitalasset.canton.sequencing.* -import com.digitalasset.canton.sequencing.client.SequencerClientTransportFactory.ValidateTransportResult -import com.digitalasset.canton.sequencing.client.grpc.GrpcSequencerChannelBuilder import com.digitalasset.canton.sequencing.client.transports.* import com.digitalasset.canton.topology.* -import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} -import com.digitalasset.canton.util.Thereafter.syntax.* +import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.* -import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable -import io.grpc.ConnectivityState import org.apache.pekko.stream.Materializer import scala.concurrent.* @@ -46,39 +37,6 @@ trait SequencerClientTransportFactory { ) .map(transports => NonEmptyUtil.fromUnsafe(transports.toMap)) - def validateTransport( - sequencerConnections: SequencerConnections, - logWarning: Boolean, - )(implicit - executionContext: ExecutionContextExecutor, - errorLoggingContext: ErrorLoggingContext, - closeContext: CloseContext, - ): EitherT[FutureUnlessShutdown, String, Unit] = - MonadUtil - .sequentialTraverse(sequencerConnections.connections)(conn => - validateTransport(conn, logWarning) - .transform { - case Right(_) => Right(ValidateTransportResult.Valid) - case Left(error) => Right(ValidateTransportResult.NotValid(error)) - } - ) - .flatMap(checkAgainstTrustThreshold(sequencerConnections.sequencerTrustThreshold, _)) - - private def checkAgainstTrustThreshold( - sequencerTrustThreshold: PositiveInt, - results: Seq[ValidateTransportResult], - )(implicit - executionContext: ExecutionContextExecutor - ): EitherT[FutureUnlessShutdown, String, Unit] = EitherT.fromEither[FutureUnlessShutdown] { - if (results.count(_ == ValidateTransportResult.Valid) >= sequencerTrustThreshold.unwrap) - Right(()) - else { - val errors = results - .collect { case ValidateTransportResult.NotValid(message) => message } - Left(errors.mkString(", ")) - } - } - def makeTransport( connection: SequencerConnection, member: Member, @@ -91,92 +49,4 @@ trait SequencerClientTransportFactory { traceContext: TraceContext, ): EitherT[Future, String, SequencerClientTransport & SequencerClientTransportPekko] - def validateTransport( - connection: SequencerConnection, - logWarning: Boolean, - )(implicit - executionContext: ExecutionContextExecutor, - errorLoggingContext: ErrorLoggingContext, - closeContext: CloseContext, - ): EitherT[FutureUnlessShutdown, String, Unit] - -} - -object SequencerClientTransportFactory { - sealed trait ValidateTransportResult extends Product with Serializable - object ValidateTransportResult { - final case object Valid extends ValidateTransportResult - final case class NotValid(message: String) extends ValidateTransportResult - } - - def validateTransport( - connection: SequencerConnection, - traceContextPropagation: TracingConfig.Propagation, - config: SequencerClientConfig, - logWarning: Boolean, - loggerFactory: NamedLoggerFactory, - )(implicit - executionContext: ExecutionContextExecutor, - errorLoggingContext: ErrorLoggingContext, - closeContext: CloseContext, - ): EitherT[FutureUnlessShutdown, String, Unit] = connection match { - case conn: GrpcSequencerConnection => - implicit val traceContext = errorLoggingContext.traceContext - errorLoggingContext.logger.info(s"Validating sequencer connection ${conn}") - val channelBuilder = ClientChannelBuilder(loggerFactory) - val channel = GrpcSequencerChannelBuilder( - channelBuilder, - conn, - NonNegativeInt.maxValue, - traceContextPropagation, - config.keepAliveClient, - ) - def closeChannel(): Unit = { - Lifecycle.close( - Lifecycle.toCloseableChannel( - channel, - errorLoggingContext.logger, - "sequencer-connection-test-channel", - ) - )( - errorLoggingContext.logger - ) - } - // clientConfig.handshakeRetryDelay.underlying.fromNow, - val retryMs = config.initialConnectionRetryDelay.asFiniteApproximation - val attempts = config.handshakeRetryDelay.underlying.toMillis / retryMs.toMillis - def check(): EitherT[Future, String, Unit] = { - channel.getState(true) match { - case ConnectivityState.READY => - errorLoggingContext.logger.info(s"Successfully connected to sequencer at ${conn}") - EitherT.rightT(()) - case other => - val msg = s"Unable to connect to sequencer at ${conn}: channel is ${other}" - errorLoggingContext.debug(msg) - EitherT.leftT(msg) - } - } - val name = "check-valid-sequencer-connection" - EitherT( - retry - .Pause( - errorLoggingContext.logger, - closeContext.context, - maxRetries = attempts.toInt, - delay = retryMs, - operationName = name, - ) - .unlessShutdown( - closeContext.context.performUnlessClosingF(name)(check().value), - NoExnRetryable, - ) - ).thereafter { _ => - closeChannel() - }.leftMap { res => - if (logWarning) { - errorLoggingContext.logger.warn(res) - } - res - } - } } diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala index c99e13531..a395b6860 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/client/SequencerTransportState.scala @@ -217,6 +217,8 @@ class SequencersTransportState( }) }.onShutdown(()) + // TODO(#17726) Figure out whether the synchronization is needed for the whole block and if so refactor into a semaphore! + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) def changeTransport( sequencerTransports: SequencerTransports[?] )(implicit traceContext: TraceContext): Future[Unit] = blocking(lock.synchronized { diff --git a/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala b/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala index 70d981f29..b1a594f69 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/sequencing/protocol/TrafficState.scala @@ -7,6 +7,7 @@ import cats.syntax.apply.* import com.digitalasset.canton.config.RequireTypes import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveLong} import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.store.db.RequiredTypesCodec.nonNegativeLongOptionGetResult import slick.jdbc.{GetResult, SetParameter} @@ -16,7 +17,7 @@ final case class TrafficState( extraTrafficConsumed: NonNegativeLong, baseTrafficRemainder: NonNegativeLong, timestamp: CantonTimestamp, -) { +) extends PrettyPrinting { lazy val extraTrafficLimit: Option[PositiveLong] = PositiveLong.create((extraTrafficRemainder + extraTrafficConsumed).value).toOption @@ -37,6 +38,13 @@ final case class TrafficState( extraTrafficRemainder = extraTrafficRemainder, extraTrafficConsumed = extraTrafficConsumed, ) + + override def pretty: Pretty[TrafficState] = prettyOfClass( + param("timestamp", _.timestamp), + param("extra traffic remainder", _.extraTrafficRemainder), + param("extra traffic consumed", _.extraTrafficConsumed), + param("base traffic remainder", _.baseTrafficRemainder), + ) } object TrafficState { diff --git a/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala b/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala index 6b109b9a4..d47da4e60 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/store/memory/InMemorySequencedEventStore.scala @@ -69,16 +69,15 @@ class InMemorySequencedEventStore(protected val loggerFactory: NamedLoggerFactor ): EitherT[Future, SequencedEventNotFoundError, PossiblyIgnoredSerializedEvent] = { logger.debug(s"Looking to retrieve delivery event $criterion") - blocking(lock.synchronized { - val resO = criterion match { + val resO = blocking(lock.synchronized { + criterion match { case ByTimestamp(timestamp) => eventByTimestamp.get(timestamp) case LatestUpto(inclusive) => eventByTimestamp.rangeTo(inclusive).lastOption.map { case (_, event) => event } } - - EitherT(Future.successful(resO.toRight(SequencedEventNotFoundError(criterion)))) }) + EitherT(Future.successful(resO.toRight(SequencedEventNotFoundError(criterion)))) } override def findRange(criterion: RangeCriterion, limit: Option[Int])(implicit @@ -87,7 +86,7 @@ class InMemorySequencedEventStore(protected val loggerFactory: NamedLoggerFactor PossiblyIgnoredSerializedEvent ]] = { logger.debug(s"Looking to retrieve delivery event $criterion") - blocking(lock.synchronized { + val res = blocking(lock.synchronized { criterion match { case ByTimestampRange(lowerInclusive, upperInclusive) => val valuesInRangeIterable = @@ -97,23 +96,22 @@ class InMemorySequencedEventStore(protected val loggerFactory: NamedLoggerFactor pruningStatusF.get match { case Some(pruningStatus) if pruningStatus.timestamp >= lowerInclusive => - EitherT.leftT[Future, Seq[PossiblyIgnoredSerializedEvent]]( - SequencedEventRangeOverlapsWithPruning(criterion, pruningStatus, result) - ) + Left(SequencedEventRangeOverlapsWithPruning(criterion, pruningStatus, result)) case _ => - EitherT.rightT[Future, SequencedEventRangeOverlapsWithPruning](result) + Right(result) } } }) + EitherT.fromEither[Future](res) } override def sequencedEvents( limit: Option[Int] = None )(implicit traceContext: TraceContext): Future[Seq[PossiblyIgnoredSerializedEvent]] = - blocking(lock.synchronized { + Future.successful(blocking(lock.synchronized { // Always copy the elements, as the returned iterator will otherwise explode if the underlying collection is modified. - Future.successful(eventByTimestamp.values.take(limit.getOrElse(Int.MaxValue)).toList) - }) + eventByTimestamp.values.take(limit.getOrElse(Int.MaxValue)).toList + })) override def doPrune( beforeAndIncluding: CantonTimestamp, diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala index 035b509d6..223f99ceb 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/Member.scala @@ -437,6 +437,9 @@ object SequencerId { val threeLetterId = String3.tryCreate("SEQ") } + implicit val sequencerIdOrdering: Ordering[SequencerId] = + Ordering.by(_.toString) + def apply(identifier: Identifier, namespace: Namespace): SequencerId = SequencerId(UniqueIdentifier(identifier, namespace)) diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala index d25fecd61..86383c523 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStateForInititalizationService.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.topology.store import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.topology.processing.SequencedTime import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore import com.digitalasset.canton.topology.{MediatorId, Member, ParticipantId} @@ -69,7 +70,13 @@ final class StoreBasedTopologyStateForInitializationService( effectiveFromO .map { effectiveFrom => logger.debug(s"Fetching initial topology state for $member at $effectiveFrom") - domainTopologyStore.findEssentialStateForMember(member, effectiveFrom.value) + // This is not a mistake: all transactions with `sequenced <= validFrom` need to come from this onboarding snapshot + // because the member only receives transactions once its onboarding transaction becomes effective. + val referenceSequencedTime = SequencedTime(effectiveFrom.value) + domainTopologyStore.findEssentialStateAtSequencedTime( + referenceSequencedTime, + excludeMappings = Nil, + ) } // TODO(#12390) should this error out if nothing can be found? .getOrElse(Future.successful(StoredTopologyTransactionsX.empty)) diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala index 4854173e5..291105579 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyStoreX.scala @@ -223,21 +223,33 @@ abstract class TopologyStoreX[+StoreID <: TopologyStoreId](implicit limit: Int, )(implicit traceContext: TraceContext): Future[Set[PartyId]] + /** Finds the topology transaction that first onboarded the sequencer with ID `sequencerId` + */ + def findFirstSequencerStateForSequencer( + sequencerId: SequencerId + )(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, SequencerDomainStateX]]] + + /** Finds the topology transaction that first onboarded the mediator with ID `mediatorId` + */ def findFirstMediatorStateForMediator( mediatorId: MediatorId )(implicit traceContext: TraceContext ): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, MediatorDomainStateX]]] + /** Finds the topology transaction that first onboarded the participant with ID `participantId` + */ def findFirstTrustCertificateForParticipant( participant: ParticipantId )(implicit traceContext: TraceContext ): Future[Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, DomainTrustCertificateX]]] - def findEssentialStateForMember( - member: Member, - asOfInclusive: CantonTimestamp, + def findEssentialStateAtSequencedTime( + asOfInclusive: SequencedTime, + excludeMappings: Seq[TopologyMappingX.Code], )(implicit traceContext: TraceContext): Future[GenericStoredTopologyTransactionsX] protected def signedTxFromStoredTx( diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala index 7325341a0..6aa7d0910 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/store/TopologyTransactionCollectionX.scala @@ -105,6 +105,16 @@ final case class StoredTopologyTransactionsX[+Op <: TopologyChangeOpX, +M <: Top } .getOrElse(this) // this case is triggered by `result` being empty } + + def retainAuthorizedHistoryAndEffectiveProposals: StoredTopologyTransactionsX[Op, M] = { + // only retain transactions that are: + filter(tx => + // * fully authorized + !tx.transaction.isProposal || + // * proposals that are still effective + tx.validUntil.isEmpty + ) + } } object StoredTopologyTransactionsX diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala index f32a883e5..b967a1eb5 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/store/db/DbTopologyStoreX.scala @@ -377,6 +377,31 @@ class DbTopologyStoreX[StoreId <: TopologyStoreId]( TopologyChangeOpX.Replace.some, ).map(_.collectOfType[TopologyChangeOpX.Replace]) + override def findFirstSequencerStateForSequencer(sequencerId: SequencerId)(implicit + traceContext: TraceContext + ): Future[Option[StoredTopologyTransactionX[Replace, SequencerDomainStateX]]] = { + logger.debug(s"Querying first sequencer state for $sequencerId") + + queryForTransactions( + // We don't expect too many MediatorDomainStateX mappings in a single domain, so fetching them all from the db + // is acceptable and also because we don't expect to run this query frequently. We can only evaluate the + // `mediatorId` field locally as the mediator-id is not exposed in a separate column. + sql" AND is_proposal = false" ++ + sql" AND operation = ${TopologyChangeOpX.Replace}" ++ + sql" AND transaction_type = ${SequencerDomainStateX.code}", + operation = "firstSequencerState", + ).map( + _.collectOfMapping[SequencerDomainStateX] + .collectOfType[Replace] + .result + .filter { + _.mapping.allSequencers.contains(sequencerId) + } + .sortBy(_.serial) + .headOption + ) + } + override def findFirstMediatorStateForMediator(mediatorId: MediatorId)(implicit traceContext: TraceContext ): Future[Option[StoredTopologyTransactionX[Replace, MediatorDomainStateX]]] = { @@ -426,14 +451,19 @@ class DbTopologyStoreX[StoreId <: TopologyStoreId]( ) } - override def findEssentialStateForMember(member: Member, asOfInclusive: CantonTimestamp)(implicit + override def findEssentialStateAtSequencedTime( + asOfInclusive: SequencedTime, + excludeMappings: Seq[TopologyMappingX.Code], + )(implicit traceContext: TraceContext ): Future[GenericStoredTopologyTransactionsX] = { - val timeFilter = sql" AND sequenced <= $asOfInclusive" - - logger.debug(s"Querying essential state for member $member as of $asOfInclusive") + val timeFilter = sql" AND sequenced <= ${asOfInclusive.value}" + val mappingFilter = excludeMapping(excludeMappings.toSet) + logger.debug(s"Querying essential state as of asOfInclusive") - queryForTransactions(timeFilter, "essentialState").map(_.asSnapshotAtMaxEffectiveTime) + queryForTransactions(timeFilter ++ mappingFilter, "essentialState").map( + _.asSnapshotAtMaxEffectiveTime.retainAuthorizedHistoryAndEffectiveProposals + ) } override def bootstrap(snapshot: GenericStoredTopologyTransactionsX)(implicit @@ -682,6 +712,14 @@ class DbTopologyStoreX[StoreId <: TopologyStoreId]( .intercalate(sql", ") ++ sql")" } + private def excludeMapping(types: Set[TopologyMappingX.Code]): SQLActionBuilderChain = { + if (types.isEmpty) sql"" + else + sql" AND transaction_type NOT IN (" ++ types.toSeq + .map(t => sql"$t") + .intercalate(sql", ") ++ sql")" + } + private def findAsOfExclusive( effective: EffectiveTime, subQuery: SQLActionBuilder, diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala index 7c874c1d9..77e854f9f 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/store/memory/InMemoryTopologyStoreX.scala @@ -33,6 +33,7 @@ import com.google.common.annotations.VisibleForTesting import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import scala.concurrent.{ExecutionContext, Future, blocking} +import scala.math.Ordering.Implicits.* class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( val storeId: StoreId, @@ -96,19 +97,18 @@ class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( private def findFilter( asOfExclusive: EffectiveTime, filter: TopologyStoreEntry => Boolean, - ): Future[Seq[GenericSignedTopologyTransactionX]] = { + ): Future[Seq[GenericSignedTopologyTransactionX]] = Future.successful { blocking { synchronized { - val res = topologyTransactionStore + topologyTransactionStore .filter(x => x.from.value < asOfExclusive.value && x.rejected.isEmpty - && (x.until.forall(_.value >= asOfExclusive.value)) + && x.until.forall(_.value >= asOfExclusive.value) && filter(x) ) .map(_.transaction) .toSeq - Future.successful(res) } } } @@ -134,7 +134,7 @@ class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( removeMapping: Map[TopologyMappingX.MappingHash, PositiveInt], removeTxs: Set[TopologyTransactionX.TxHash], additions: Seq[GenericValidatedTopologyTransactionX], - )(implicit traceContext: TraceContext): Future[Unit] = + )(implicit traceContext: TraceContext): Future[Unit] = { blocking { synchronized { // transactionally @@ -176,9 +176,10 @@ class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( ) } } - Future.unit } } + Future.unit + } @VisibleForTesting override protected[topology] def dumpStoreContent()(implicit @@ -372,6 +373,30 @@ class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( ) } + override def findFirstSequencerStateForSequencer( + sequencerId: SequencerId + )(implicit + traceContext: TraceContext + ): Future[ + Option[StoredTopologyTransactionX[TopologyChangeOpX.Replace, SequencerDomainStateX]] + ] = { + filteredState( + blocking(synchronized(topologyTransactionStore.toSeq)), + entry => + !entry.transaction.isProposal && + entry.operation == TopologyChangeOpX.Replace && + entry.mapping + .select[SequencerDomainStateX] + .exists(m => m.allSequencers.contains(sequencerId)), + ).map( + _.collectOfType[TopologyChangeOpX.Replace] + .collectOfMapping[SequencerDomainStateX] + .result + .sortBy(_.serial) + .headOption + ) + } + override def findFirstMediatorStateForMediator( mediatorId: MediatorId )(implicit @@ -419,7 +444,10 @@ class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( } - override def findEssentialStateForMember(member: Member, asOfInclusive: CantonTimestamp)(implicit + override def findEssentialStateAtSequencedTime( + asOfInclusive: SequencedTime, + excludeMappings: Seq[TopologyMappingX.Code], + )(implicit traceContext: TraceContext ): Future[GenericStoredTopologyTransactionsX] = { // asOfInclusive is the effective time of the transaction that onboarded the member. @@ -428,10 +456,14 @@ class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( blocking(synchronized { topologyTransactionStore.toSeq }), - entry => entry.sequenced.value <= asOfInclusive, + entry => + entry.sequenced <= asOfInclusive && + !excludeMappings.contains(entry.mapping.code), ).map( // 2. transform the result such that the validUntil fields are set as they were at maxEffective time of the snapshot _.asSnapshotAtMaxEffectiveTime + // and remove proposals that have been superseded by full authorized transactions + .retainAuthorizedHistoryAndEffectiveProposals ) } @@ -490,15 +522,15 @@ class InMemoryTopologyStoreX[+StoreId <: TopologyStoreId]( )(implicit traceContext: TraceContext ): Future[GenericStoredTopologyTransactionsX] = - blocking(synchronized { + Future.successful(blocking(synchronized { val selected = topologyTransactionStore .filter(x => x.from.value > timestampExclusive && (!x.transaction.isProposal || x.until.isEmpty) && x.rejected.isEmpty ) .map(_.toStoredTransaction) .toSeq - Future.successful(StoredTopologyTransactionsX(limit.fold(selected)(selected.take))) - }) + StoredTopologyTransactionsX(limit.fold(selected)(selected.take)) + })) private def allTransactions( includeRejected: Boolean = false diff --git a/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala b/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala index 0d4cd7cf8..e0acdf6da 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/topology/transaction/TopologyTransactionX.scala @@ -35,7 +35,7 @@ object TopologyChangeOpX { /** Adds or replaces an existing record */ final case object Replace extends TopologyChangeOpX { override def toProto: v30.Enums.TopologyChangeOp = - v30.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_REPLACE_UNSPECIFIED + v30.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_ADD_REPLACE } final case object Remove extends TopologyChangeOpX { override def toProto: v30.Enums.TopologyChangeOp = @@ -56,8 +56,10 @@ object TopologyChangeOpX { protoOp: v30.Enums.TopologyChangeOp ): ParsingResult[TopologyChangeOpX] = protoOp match { + case v30.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_UNSPECIFIED => + Left(FieldNotSet(protoOp.name)) case v30.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_REMOVE => Right(Remove) - case v30.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_REPLACE_UNSPECIFIED => Right(Replace) + case v30.Enums.TopologyChangeOp.TOPOLOGY_CHANGE_OP_ADD_REPLACE => Right(Replace) case v30.Enums.TopologyChangeOp.Unrecognized(x) => Left(UnrecognizedEnum(protoOp.name, x)) } diff --git a/community/base/src/main/scala/com/digitalasset/canton/traffic/TrafficBalanceSubmissionHandler.scala b/community/base/src/main/scala/com/digitalasset/canton/traffic/TrafficBalanceSubmissionHandler.scala index 2377d8932..28cc3f8ab 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/traffic/TrafficBalanceSubmissionHandler.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/traffic/TrafficBalanceSubmissionHandler.scala @@ -161,6 +161,11 @@ class TrafficBalanceSubmissionHandler( s"Traffic balance request with max sequencing time $maxSequencingTime successfully submitted" ) Right(()) + case SendResult.Error( + DeliverError(_, _, _, _, SequencerErrors.AggregateSubmissionAlreadySent(message)) + ) => + logger.info(s"The top-up request was already sent: $message") + Right(()) case SendResult.Error(err) => Left(TrafficControlErrors.TrafficBalanceRequestAsyncSendFailed.Error(err.show)) case SendResult.Timeout(time) => diff --git a/community/base/src/main/scala/com/digitalasset/canton/util/IterableUtil.scala b/community/base/src/main/scala/com/digitalasset/canton/util/IterableUtil.scala index df6d607fa..0162840f7 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/util/IterableUtil.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/util/IterableUtil.scala @@ -54,6 +54,33 @@ object IterableUtil { generateBlocks(peek()) } + /** Splits the sequence `xs` after each element that satisfies `p` and returns the sequence of chunks */ + def splitAfter[A, CC[X] <: immutable.Iterable[X], C, B]( + xs: IterableOps[A, CC, C & immutable.Iterable[A]] + )(p: A => Boolean): LazyList[NonEmpty[CC[A]]] = { + val iterator = xs.iterator + + def go(): LazyList[NonEmpty[CC[A]]] = { + if (iterator.hasNext) { + val chunkBuilder = xs.iterableFactory.newBuilder[A] + + @tailrec def addUntilP(): Unit = { + if (iterator.hasNext) { + val x = iterator.next() + chunkBuilder.addOne(x) + if (!p(x)) addUntilP() + } + } + + addUntilP() + val block = chunkBuilder.result() + NonEmptyUtil.fromUnsafe(block) #:: go() + } else LazyList.empty + } + + go() + } + /** Returns the zipping of `elems` with `seq` where members `y` of `seq` are skipped if `!by(x, y)` * for the current member `x` from `elems`. Zipping stops when there are no more elements in `elems` or `seq` */ diff --git a/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala b/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala index eae8b539b..32a7b40f5 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/util/LoggerUtil.scala @@ -46,13 +46,15 @@ object LoggerUtil { } } - /** Log the time taken by a task `run` */ - def clue[T](message: => String)(run: => T)(implicit loggingContext: ErrorLoggingContext): T = { + /** Log the time taken by a task `run` and optionally non-fatal throwable */ + def clue[T](message: => String, logNonFatalThrowable: Boolean = false)( + run: => T + )(implicit loggingContext: ErrorLoggingContext): T = { val logger = loggingContext.logger implicit val traceContext: TraceContext = loggingContext.traceContext logger.debug(s"Starting $message") val st = System.nanoTime() - val ret = run + val ret = if (logNonFatalThrowable) logOnThrow(run) else run val end = roundDurationForHumans(Duration(System.nanoTime() - st, TimeUnit.NANOSECONDS)) logger.debug(s"Finished $message after $end") ret diff --git a/community/base/src/main/scala/com/digitalasset/canton/util/SeqUtil.scala b/community/base/src/main/scala/com/digitalasset/canton/util/SeqUtil.scala index 8346cc9cc..f33e9988e 100644 --- a/community/base/src/main/scala/com/digitalasset/canton/util/SeqUtil.scala +++ b/community/base/src/main/scala/com/digitalasset/canton/util/SeqUtil.scala @@ -3,26 +3,11 @@ package com.digitalasset.canton.util -import com.daml.nonempty.{NonEmpty, NonEmptyUtil} - import scala.annotation.tailrec import scala.util.Random object SeqUtil { - /** Splits the sequence `xs` after each element that satisfies `p` and returns the sequence of chunks */ - def splitAfter[A](xs: Seq[A])(p: A => Boolean): Seq[NonEmpty[Seq[A]]] = { - @tailrec def go(acc: Seq[NonEmpty[Seq[A]]], rest: Seq[A]): Seq[NonEmpty[Seq[A]]] = { - val (before, next) = rest.span(!p(_)) - NonEmpty.from(next) match { - case None => NonEmpty.from(before).fold(acc)(_ +: acc) - case Some(nextNE) => - go(NonEmptyUtil.fromUnsafe(before :+ nextNE.head1) +: acc, nextNE.tail1) - } - } - go(Seq.empty, xs).reverse - } - /** Picks a random subset of indices of size `size` from `xs` and returns a random permutation of the elements * at these indices. * Implements the Fisher-Yates shuffle (https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle) diff --git a/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java index f633fba6e..ed48e1909 100644 --- a/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java +++ b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageResponse.java @@ -14,7 +14,7 @@ public final class GetPackageResponse { // Clone of the PackageServiceOuterClass.HashFunction enumeration public enum HashFunction { - SHA256(0), + HASH_FUNCTION_SHA256(0), UNRECOGNIZED(-1), ; diff --git a/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java index a640fbeb1..6769d6e57 100644 --- a/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java +++ b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/GetPackageStatusResponse.java @@ -12,8 +12,8 @@ public final class GetPackageStatusResponse { // Clone of the PackageServiceOuterClass.PackageStatus enumeration public enum PackageStatus { - UNKNOWN(0), - REGISTERED(1), + PACKAGE_STATUS_UNSPECIFIED(0), + PACKAGE_STATUS_REGISTERED(1), UNRECOGNIZED(-1), ; diff --git a/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ValueDecoder.java b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ValueDecoder.java index 87e2f8829..c94912a1b 100644 --- a/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ValueDecoder.java +++ b/community/bindings-java/src/main/java/com/daml/ledger/javaapi/data/codegen/ValueDecoder.java @@ -4,7 +4,6 @@ package com.daml.ledger.javaapi.data.codegen; import com.daml.ledger.javaapi.data.Value; -import java.util.function.Function; /** * A converter from the encoded form of a Daml value, represented by {@link Value}, to the @@ -51,24 +50,4 @@ public interface ValueDecoder { default ContractId fromContractId(String contractId) { throw new IllegalArgumentException("Cannot create contract id for this data type"); } - - // TODO(i15640) delete - /** - * @deprecated since Daml 2.5.0; it is only used in deprecated fromValue method of all generated - * data class - */ - @Deprecated - static ValueDecoder fromFunction(Function fromValue) { - return new ValueDecoder<>() { - @Override - public A decode(Value value) { - return fromValue.apply(value); - } - - @Override - public ContractId fromContractId(String contractId) { - return new ContractId<>(contractId); - } - }; - } } diff --git a/community/common/src/main/daml/CantonExamples/daml.yaml b/community/common/src/main/daml/CantonExamples/daml.yaml index 7df5775ba..94f00d4d6 100644 --- a/community/common/src/main/daml/CantonExamples/daml.yaml +++ b/community/common/src/main/daml/CantonExamples/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: CantonExamples diff --git a/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql b/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql index 3d171cadc..7c7b25a55 100644 --- a/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql +++ b/community/common/src/main/resources/db/migration/canton/h2/stable/V1_1__initial.sql @@ -189,7 +189,7 @@ create table common_topology_dispatching ( create type change_type as enum ('deactivation', 'activation'); -- The specific operation type that introduced a contract change. -create type operation_type as enum ('create', 'transfer-in', 'archive', 'transfer-out'); +create type operation_type as enum ('create', 'add', 'transfer-in', 'archive', 'purge', 'transfer-out'); -- Maintains the status of contracts create table par_active_contracts ( @@ -202,8 +202,8 @@ create table par_active_contracts ( ts bigint not null, -- Request counter of the time of change request_counter bigint not null, - -- optional remote domain id in case of transfers - remote_domain_id int, + -- optional remote domain index in case of transfers + remote_domain_idx int, transfer_counter bigint default null, primary key (domain_id, contract_id, ts, request_counter, change) ); @@ -977,11 +977,10 @@ create table ord_pbft_messages( discriminator smallint not null, -- sender of the message - from_host varchar(300) not null, - from_port smallint not null, + from_sequencer_id varchar(300) not null, -- for each block number, we only expect one message of each kind for the same sender. -- in the case of pre-prepare, we only expect one message for the whole block, but for simplicity -- we won't differentiate that at the database level. - primary key (block_number, from_host, from_port, discriminator) + primary key (block_number, from_sequencer_id, discriminator) ); diff --git a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql index daaf04900..b1e2f9f20 100644 --- a/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql +++ b/community/common/src/main/resources/db/migration/canton/postgres/stable/V1_1__initial.sql @@ -194,7 +194,7 @@ create table common_topology_dispatching ( create type change_type as enum ('deactivation', 'activation'); -- The specific operation type that introduced a contract change. -create type operation_type as enum ('create', 'transfer-in', 'archive', 'transfer-out'); +create type operation_type as enum ('create', 'add', 'transfer-in', 'archive', 'purge', 'transfer-out'); -- Maintains the status of contracts create table par_active_contracts ( @@ -207,8 +207,8 @@ create table par_active_contracts ( ts bigint not null, -- Request counter of the time of change request_counter bigint not null, - -- optional remote domain id in case of transfers - remote_domain_id int, + -- optional remote domain index in case of transfers + remote_domain_idx int, transfer_counter bigint default null, primary key (domain_id, contract_id, ts, request_counter, change) ); @@ -1005,11 +1005,10 @@ create table ord_pbft_messages( discriminator smallint not null, -- sender of the message - from_host varchar(300) collate "C" not null, - from_port smallint not null, + from_sequencer_id varchar(300) collate "C" not null, -- for each block number, we only expect one message of each kind for the same sender. -- in the case of pre-prepare, we only expect one message for the whole block, but for simplicity -- we won't differentiate that at the database level. - primary key (block_number, from_host, from_port, discriminator) + primary key (block_number, from_sequencer_id, discriminator) ); diff --git a/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala b/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala index fa7477eda..4641a4adb 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoader.scala @@ -17,26 +17,30 @@ import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader.{ SequencerInfoLoaderError, } import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.config.RequireTypes.NonNegativeInt +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} import com.digitalasset.canton.lifecycle.CloseContext -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} import com.digitalasset.canton.protocol.StaticDomainParameters import com.digitalasset.canton.sequencing.protocol.{HandshakeRequest, HandshakeResponse} import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, SequencerConnection, + SequencerConnectionValidation, SequencerConnections, } import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.{TraceContext, TracingConfig} import com.digitalasset.canton.util.FutureInstances.* +import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.Thereafter.syntax.* import com.digitalasset.canton.util.retry.RetryUtil.NoExnRetryable import com.digitalasset.canton.util.{MonadUtil, retry} import com.digitalasset.canton.version.ProtocolVersion +import com.google.common.annotations.VisibleForTesting import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.Materializer +import scala.annotation.tailrec import scala.concurrent.{ExecutionContextExecutor, Future} class SequencerInfoLoader( @@ -63,81 +67,6 @@ class SequencerInfoLoader( ) } - private def extractSingleError( - errors: Seq[LoadSequencerEndpointInformationResult.NotValid] - ): SequencerInfoLoaderError = { - require(errors.nonEmpty, "Non-empty list of errors is expected") - val nonEmptyResult = NonEmptyUtil.fromUnsafe(errors) - if (nonEmptyResult.size == 1) nonEmptyResult.head1.error - else { - val message = nonEmptyResult.map(_.error.cause).mkString(",") - SequencerInfoLoaderError.FailedToConnectToSequencers(message) - } - } - - private def aggregateBootstrapInfo(sequencerConnections: SequencerConnections)( - result: Seq[LoadSequencerEndpointInformationResult] - )(implicit - traceContext: TraceContext - ): Either[SequencerInfoLoaderError, SequencerAggregatedInfo] = { - require(result.nonEmpty, "Non-empty list of sequencerId-to-endpoint pair is expected") - val validSequencerConnections = result.collect { - case valid: LoadSequencerEndpointInformationResult.Valid => - valid - } - if (validSequencerConnections.sizeIs >= sequencerConnections.sequencerTrustThreshold.unwrap) { - result.collect { - case LoadSequencerEndpointInformationResult.NotValid(sequencerConnection, error) => - logger.warn( - s"Unable to connect to sequencer $sequencerConnection because of ${error.cause}" - ) - }.discard - val nonEmptyResult = NonEmptyUtil.fromUnsafe(validSequencerConnections) - val domainIds = nonEmptyResult.map(_.domainClientBootstrapInfo.domainId).toSet - val staticDomainParameters = nonEmptyResult.map(_.staticDomainParameters).toSet - val expectedSequencers = NonEmptyUtil.fromUnsafe( - nonEmptyResult - .groupBy(_.sequencerAlias) - .view - .mapValues(_.map(_.domainClientBootstrapInfo.sequencerId).head1) - .toMap - ) - if (domainIds.sizeIs > 1) { - SequencerInfoLoaderError - .SequencersFromDifferentDomainsAreConfigured( - s"Non-unique domain ids received by connecting to sequencers: [${domainIds.mkString(",")}]" - ) - .asLeft - } else if (staticDomainParameters.sizeIs > 1) { - SequencerInfoLoaderError - .MisconfiguredStaticDomainParameters( - s"Non-unique static domain parameters received by connecting to sequencers" - ) - .asLeft - } else - SequencerConnections - .many( - nonEmptyResult.map(_.connection), - sequencerConnections.sequencerTrustThreshold, - sequencerConnections.submissionRequestAmplification, - ) - .leftMap(SequencerInfoLoaderError.FailedToConnectToSequencers) - .map(connections => - SequencerAggregatedInfo( - domainId = domainIds.head1, - staticDomainParameters = staticDomainParameters.head1, - expectedSequencers = expectedSequencers, - sequencerConnections = connections, - ) - ) - } else { - val invalidSequencerConnections = result.collect { - case nonValid: LoadSequencerEndpointInformationResult.NotValid => nonValid - } - extractSingleError(invalidSequencerConnections).asLeft - } - } - private def getBootstrapInfoDomainParameters( domainAlias: DomainAlias, sequencerAlias: SequencerAlias, @@ -199,7 +128,7 @@ class SequencerInfoLoader( ): EitherT[ Future, SequencerInfoLoaderError, - (SequencerAlias, (DomainClientBootstrapInfo, StaticDomainParameters)), + (DomainClientBootstrapInfo, StaticDomainParameters), ] = connection match { case grpc: GrpcSequencerConnection => @@ -218,7 +147,7 @@ class SequencerInfoLoader( client, ) .thereafter(_ => client.close()) - } yield connection.sequencerAlias -> bootstrapInfoDomainParameters + } yield bootstrapInfoDomainParameters } private def performHandshake( @@ -251,23 +180,83 @@ class SequencerInfoLoader( () } - def loadSequencerEndpoints( + def loadAndAggregateSequencerEndpoints( domainAlias: DomainAlias, sequencerConnections: SequencerConnections, + sequencerConnectionValidation: SequencerConnectionValidation, )(implicit traceContext: TraceContext, closeContext: CloseContext, ): EitherT[Future, SequencerInfoLoaderError, SequencerAggregatedInfo] = EitherT( + loadSequencerEndpoints( + domainAlias, + sequencerConnections, + sequencerConnectionValidation == SequencerConnectionValidation.All, + ).map( + SequencerInfoLoader.aggregateBootstrapInfo( + logger, + sequencerTrustThreshold = sequencerConnections.sequencerTrustThreshold, + submissionRequestAmplification = sequencerConnections.submissionRequestAmplification, + sequencerConnectionValidation = sequencerConnectionValidation, + ) + ) + ) + + def validateSequencerConnection( + alias: DomainAlias, + expectedDomainId: Option[DomainId], + sequencerConnections: SequencerConnections, + sequencerConnectionValidation: SequencerConnectionValidation, + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): EitherT[Future, Seq[LoadSequencerEndpointInformationResult.NotValid], Unit] = + sequencerConnectionValidation match { + case SequencerConnectionValidation.Disabled => EitherT.rightT(()) + case SequencerConnectionValidation.All | SequencerConnectionValidation.Active => + EitherT( + loadSequencerEndpoints( + alias, + sequencerConnections, + sequencerConnectionValidation == SequencerConnectionValidation.All, + ) + .map( + SequencerInfoLoader.validateNewSequencerConnectionResults( + expectedDomainId, + sequencerConnectionValidation, + logger, + )(_) + ) + ) + } + + private def loadSequencerEndpoints( + domainAlias: DomainAlias, + sequencerConnections: SequencerConnections, + loadAllEndpoints: Boolean, + )(implicit + traceContext: TraceContext, + closeContext: CloseContext, + ): Future[Seq[LoadSequencerEndpointInformationResult]] = { + // if we want to validate all endpoints, we can expand the list of connections on a per-endpoint basis + // during aggregation, we'll boil this down again + val connections = if (loadAllEndpoints) { + sequencerConnections.connections.flatMap { case connection: GrpcSequencerConnection => + connection.endpoints.map(endpoint => + connection.copy(endpoints = NonEmpty.mk(Seq, endpoint)) + ) + } + } else + sequencerConnections.connections MonadUtil .parTraverseWithLimit( parallelism = sequencerInfoLoadParallelism.unwrap - )(sequencerConnections.connections) { connection => + )(connections) { connection => getBootstrapInfoDomainParameters(domainAlias)(connection).value .map { - case Right((sequencerAlias, (domainClientBootstrapInfo, staticDomainParameters))) => + case Right((domainClientBootstrapInfo, staticDomainParameters)) => LoadSequencerEndpointInformationResult.Valid( connection, - sequencerAlias, domainClientBootstrapInfo, staticDomainParameters, ) @@ -275,8 +264,7 @@ class SequencerInfoLoader( LoadSequencerEndpointInformationResult.NotValid(connection, error) } } - .map(aggregateBootstrapInfo(sequencerConnections)) - ) + } } @@ -287,7 +275,6 @@ object SequencerInfoLoader { object LoadSequencerEndpointInformationResult { final case class Valid( connection: SequencerConnection, - sequencerAlias: SequencerAlias, domainClientBootstrapInfo: DomainClientBootstrapInfo, staticDomainParameters: StaticDomainParameters, ) extends LoadSequencerEndpointInformationResult @@ -314,7 +301,7 @@ object SequencerInfoLoader { final case class HandshakeFailedError(cause: String) extends SequencerInfoLoaderError final case class SequencersFromDifferentDomainsAreConfigured(cause: String) extends SequencerInfoLoaderError - + final case class InconsistentConnectivity(cause: String) extends SequencerInfoLoaderError final case class MisconfiguredStaticDomainParameters(cause: String) extends SequencerInfoLoaderError final case class FailedToConnectToSequencers(cause: String) extends SequencerInfoLoaderError @@ -322,7 +309,7 @@ object SequencerInfoLoader { extends SequencerInfoLoaderError } - def fromSequencerConnectClientError(alias: DomainAlias)( + private def fromSequencerConnectClientError(alias: DomainAlias)( error: SequencerConnectClient.Error ): SequencerInfoLoaderError = error match { case SequencerConnectClient.Error.DeserializationFailure(e) => @@ -334,4 +321,194 @@ object SequencerInfoLoader { case SequencerConnectClient.Error.Transport(message) => SequencerInfoLoaderError.DomainIsNotAvailableError(alias, message) } + + /** Small utility function used to validate the sequencer connections whenever the configuration changes */ + def validateNewSequencerConnectionResults( + expectedDomainId: Option[DomainId], + sequencerConnectionValidation: SequencerConnectionValidation, + logger: TracedLogger, + )( + results: Seq[LoadSequencerEndpointInformationResult] + )(implicit + traceContext: TraceContext + ): Either[Seq[LoadSequencerEndpointInformationResult.NotValid], Unit] = { + // now, check what failed and whether the reported sequencer ids and domain-ids aligned + @tailrec + def go( + reference: Option[LoadSequencerEndpointInformationResult.Valid], + sequencerIds: Map[SequencerId, SequencerAlias], + rest: List[LoadSequencerEndpointInformationResult], + accumulated: Seq[LoadSequencerEndpointInformationResult.NotValid], + ): Seq[LoadSequencerEndpointInformationResult.NotValid] = rest match { + case Nil => + accumulated + case (notValid: LoadSequencerEndpointInformationResult.NotValid) :: rest => + if (sequencerConnectionValidation != SequencerConnectionValidation.All) { + logger.info( + s"Skipping validation, as I am unable to obtain domain-id and sequencer-id: ${notValid.error} for ${notValid.sequencerConnection}" + ) + go(reference, sequencerIds, rest, accumulated) + } else + go(reference, sequencerIds, rest, notValid +: accumulated) + case (valid: LoadSequencerEndpointInformationResult.Valid) :: rest => + val result = for { + // check that domain-id matches the reference + _ <- Either.cond( + reference.forall(x => + x.domainClientBootstrapInfo.domainId == valid.domainClientBootstrapInfo.domainId + ), + (), + SequencerInfoLoaderError.SequencersFromDifferentDomainsAreConfigured( + show"Domain-id mismatch ${valid.domainClientBootstrapInfo.domainId} vs the first one found ${reference + .map(_.domainClientBootstrapInfo.domainId)}" + ), + ) + // check that static domain parameters match + _ <- Either.cond( + reference.forall(x => x.staticDomainParameters == valid.staticDomainParameters), + (), + SequencerInfoLoaderError.MisconfiguredStaticDomainParameters( + show"Static domain parameters mismatch ${valid.staticDomainParameters.toString} vs the first one found ${reference + .map(_.staticDomainParameters.toString)}" + ), + ) + // check that domain-id matches expected + _ <- Either.cond( + expectedDomainId.forall(_ == valid.domainClientBootstrapInfo.domainId), + (), + SequencerInfoLoaderError.InconsistentConnectivity( + show"Domain-id ${valid.domainClientBootstrapInfo.domainId} does not match expected ${expectedDomainId}" + ), + ) + // check that we don't have the same sequencer-id reported by different aliases + _ <- Either.cond( + sequencerIds + .get(valid.domainClientBootstrapInfo.sequencerId) + .forall(_ == valid.connection.sequencerAlias), + (), + SequencerInfoLoaderError.InconsistentConnectivity( + show"the same sequencer-id reported by different alias ${sequencerIds + .get(valid.domainClientBootstrapInfo.sequencerId)}" + ), + ) + _ <- sequencerIds + .collectFirst { + case (sequencerId, alias) + if alias == valid.connection.sequencerAlias && sequencerId != valid.domainClientBootstrapInfo.sequencerId => + SequencerInfoLoaderError.InconsistentConnectivity( + show"sequencer-id mismatch ${valid.domainClientBootstrapInfo.sequencerId} vs previously observed ${sequencerId}" + ) + } + .toLeft(()) + } yield () + result match { + case Right(_) => + go( + Some(valid), + sequencerIds.updated( + valid.domainClientBootstrapInfo.sequencerId, + valid.connection.sequencerAlias, + ), + rest, + accumulated, + ) + case Left(error) => + go( + reference, + sequencerIds, + rest, + LoadSequencerEndpointInformationResult.NotValid( + valid.connection, + error, + ) +: accumulated, + ) + } + + } + val collected = go(None, Map.empty, results.toList, Seq.empty) + Either.cond(collected.isEmpty, (), collected) + } + + /** Aggregates the endpoint information into the actual connection + * + * Given a set of sequencer connections and attempts to get the sequencer-id and domain-id + * from each of them, we'll recompute the actual connections to be used. + * Note that this method here would require a bit more smartness as whether a sequencer + * is considered or not depends on whether it was up when we made the connection. + */ + @VisibleForTesting + private[grpc] def aggregateBootstrapInfo( + logger: TracedLogger, + sequencerTrustThreshold: PositiveInt, + submissionRequestAmplification: PositiveInt, + sequencerConnectionValidation: SequencerConnectionValidation, + )( + fullResult: Seq[LoadSequencerEndpointInformationResult] + )(implicit + traceContext: TraceContext + ): Either[SequencerInfoLoaderError, SequencerAggregatedInfo] = { + + require(fullResult.nonEmpty, "Non-empty list of sequencerId-to-endpoint pair is expected") + + validateNewSequencerConnectionResults(None, sequencerConnectionValidation, logger)( + fullResult.toList + ) match { + case Right(()) => + val validSequencerConnections = fullResult + .collect { case valid: LoadSequencerEndpointInformationResult.Valid => + valid + } + .groupBy(_.connection.sequencerAlias) + .flatMap { case (_, v) => v.headOption } + .toSeq + if (validSequencerConnections.sizeIs >= sequencerTrustThreshold.unwrap) { + val nonEmptyResult = NonEmptyUtil.fromUnsafe(validSequencerConnections) + val expectedSequencers = NonEmptyUtil.fromUnsafe( + nonEmptyResult + .groupBy(_.connection.sequencerAlias) + .view + .mapValues(_.map(_.domainClientBootstrapInfo.sequencerId).head1) + .toMap + ) + SequencerConnections + .many( + nonEmptyResult.map(_.connection), + sequencerTrustThreshold, + submissionRequestAmplification, + ) + .leftMap(SequencerInfoLoaderError.FailedToConnectToSequencers) + .map(connections => + SequencerAggregatedInfo( + domainId = nonEmptyResult.head1.domainClientBootstrapInfo.domainId, + staticDomainParameters = nonEmptyResult.head1.staticDomainParameters, + expectedSequencers = expectedSequencers, + sequencerConnections = connections, + ) + ) + } else { + if (sequencerTrustThreshold.unwrap > 1) + logger.warn( + s"Insufficient valid sequencer connections ${validSequencerConnections.size} to reach threshold ${sequencerTrustThreshold.unwrap}" + ) + val invalidSequencerConnections = fullResult.collect { + case nonValid: LoadSequencerEndpointInformationResult.NotValid => nonValid + } + extractSingleError(invalidSequencerConnections).asLeft + } + case Left(value) => extractSingleError(value).asLeft + } + } + + private def extractSingleError( + errors: Seq[LoadSequencerEndpointInformationResult.NotValid] + ): SequencerInfoLoaderError = { + require(errors.nonEmpty, "Non-empty list of errors is expected") + val nonEmptyResult = NonEmptyUtil.fromUnsafe(errors) + if (nonEmptyResult.size == 1) nonEmptyResult.head1.error + else { + val message = nonEmptyResult.map(_.error.cause).mkString(",") + SequencerInfoLoaderError.FailedToConnectToSequencers(message) + } + } + } diff --git a/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala b/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala index 41fba6a70..065604745 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/jce/JcePrivateCrypto.scala @@ -152,4 +152,5 @@ class JcePrivateCrypto( } + override def close(): Unit = () } diff --git a/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkPrivateCrypto.scala b/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkPrivateCrypto.scala index dd9adea63..4d10a765e 100644 --- a/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkPrivateCrypto.scala +++ b/community/common/src/main/scala/com/digitalasset/canton/crypto/provider/tink/TinkPrivateCrypto.scala @@ -143,6 +143,8 @@ class TinkPrivateCrypto private ( ) } yield keypair } + + override def close(): Unit = () } object TinkPrivateCrypto { diff --git a/community/common/src/main/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputations.scala b/community/common/src/main/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputations.scala new file mode 100644 index 000000000..849826508 --- /dev/null +++ b/community/common/src/main/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputations.scala @@ -0,0 +1,81 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import com.digitalasset.canton.config.RequireTypes.PositiveInt +import com.digitalasset.canton.topology.transaction.MediatorDomainStateX + +object MediatorGroupDeltaComputations { + + def verifyProposalConsistency( + adds: Seq[MediatorId], + removes: Seq[MediatorId], + observerAdds: Seq[MediatorId], + observerRemoves: Seq[MediatorId], + updateThreshold: Option[PositiveInt], + ): Either[String, Unit] = for { + _ <- + Either.cond( + !(adds.isEmpty && removes.isEmpty && observerAdds.isEmpty && observerRemoves.isEmpty && updateThreshold.isEmpty), + (), + "no mediator group changes proposed", + ) + + error = Seq[(Seq[MediatorId], Seq[MediatorId], String)]( + (adds, removes, "added and removed as active"), + (adds, observerAdds, "added as active and observer"), + (observerAdds, observerRemoves, "added and removed as observer"), + (removes, observerRemoves, "removed as active and observer"), + ).flatMap { case (first, second, msg) => + val intersection = first.intersect(second) + Option.when(intersection.nonEmpty)( + s"the same mediators ${intersection.mkString(",")} cannot be $msg in the same proposal" + ) + }.mkString(", ") + _ <- Either.cond(error.isEmpty, (), error) + } yield () + + def verifyProposalAgainstCurrentState( + mdsO: Option[MediatorDomainStateX], + adds: Seq[MediatorId], + removes: Seq[MediatorId], + observerAdds: Seq[MediatorId], + observerRemoves: Seq[MediatorId], + updateThreshold: Option[PositiveInt], + ): Either[String, Unit] = { + val (currentActive, currentObservers, currentThreshold) = + mdsO.fold[(Seq[MediatorId], Seq[MediatorId], PositiveInt)]((Nil, Nil, PositiveInt.one))(mds => + (mds.active, mds.observers, mds.threshold) + ) + + val error = Seq[(Seq[MediatorId], Seq[MediatorId], Boolean, String)]( + (currentActive, adds, false, "to be added already active"), + (currentObservers, observerAdds, false, "to be added as observer already observer"), + (currentActive, removes, true, "to be removed not active"), + (currentObservers, observerRemoves, true, "to be removed as observer not observer"), + ).flatMap { case (current, proposed, shouldBePresent, msg) => + val badMediatorIds = + (if (shouldBePresent) proposed.diff(current) + else current.intersect(proposed)) + + Option.when(badMediatorIds.nonEmpty)(s"mediators ${badMediatorIds.mkString(",")} $msg") + }.mkString(", ") + + for { + _ <- Either.cond(error.isEmpty, (), error) + activeMediatorsInProposal = currentActive.diff(removes) ++ adds + _ <- Either.cond( + activeMediatorsInProposal.nonEmpty, + (), + "mediator group without active mediators", + ) + threshold = updateThreshold.getOrElse(currentThreshold) + _ <- Either.cond( + activeMediatorsInProposal.size >= threshold.value, + (), + s"mediator group threshold ${threshold} larger than active mediator size ${activeMediatorsInProposal.size}", + ) + } yield () + } +} diff --git a/community/common/src/test/scala/com/digitalasset/canton/Generators.scala b/community/common/src/test/scala/com/digitalasset/canton/Generators.scala index 6611e3920..72d07cb16 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/Generators.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/Generators.scala @@ -31,8 +31,8 @@ object Generators { Gen.stringOfN(32, Gen.alphaNumChar).map(WorkflowId.assertFromString) ) - def transferCounterOGen: Gen[TransferCounterO] = - Gen.choose(0, Long.MaxValue).map(i => Some(TransferCounter(i))) + def transferCounterGen: Gen[TransferCounter] = + Gen.choose(0, Long.MaxValue).map(i => TransferCounter(i)) def lengthLimitedStringGen[A <: AbstractLengthLimitedString]( companion: LengthLimitedStringCompanion[A] diff --git a/community/common/src/test/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoaderTest.scala b/community/common/src/test/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoaderTest.scala new file mode 100644 index 000000000..458dbd5c9 --- /dev/null +++ b/community/common/src/test/scala/com/digitalasset/canton/common/domain/grpc/SequencerInfoLoaderTest.scala @@ -0,0 +1,191 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.common.domain.grpc + +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.common.domain.SequencerConnectClient.DomainClientBootstrapInfo +import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader.{ + LoadSequencerEndpointInformationResult, + SequencerInfoLoaderError, +} +import com.digitalasset.canton.config.RequireTypes.{Port, PositiveInt} +import com.digitalasset.canton.networking.Endpoint +import com.digitalasset.canton.sequencing.{GrpcSequencerConnection, SequencerConnectionValidation} +import com.digitalasset.canton.topology.{DomainId, SequencerId, UniqueIdentifier} +import com.digitalasset.canton.{BaseTest, BaseTestWordSpec, SequencerAlias} +import org.scalatest.Assertion + +class SequencerInfoLoaderTest extends BaseTestWordSpec { + + private lazy val sequencer1 = SequencerId( + UniqueIdentifier.tryFromProtoPrimitive("sequencer1::namespace") + ) + private lazy val sequencer2 = SequencerId( + UniqueIdentifier.tryFromProtoPrimitive("sequencer2::namespace") + ) + private lazy val sequencerAlias1 = SequencerAlias.tryCreate("sequencer1") + private lazy val sequencerAlias2 = SequencerAlias.tryCreate("sequencer2") + private lazy val domainId1 = DomainId.tryFromString("first::namespace") + private lazy val domainId2 = DomainId.tryFromString("second::namespace") + private lazy val endpoint1 = Endpoint("localhost", Port.tryCreate(1001)) + private lazy val endpoint2 = Endpoint("localhost", Port.tryCreate(1002)) + private lazy val staticDomainParameters = BaseTest.defaultStaticDomainParametersWith() + + private def mapArgs( + args: List[ + (SequencerAlias, Endpoint, Either[SequencerInfoLoaderError, DomainClientBootstrapInfo]) + ] + ): List[LoadSequencerEndpointInformationResult] = + args + .map { case (alias, endpoint, result) => + ( + GrpcSequencerConnection( + NonEmpty.mk(Seq, endpoint), + transportSecurity = false, + None, + alias, + ), + result, + ) + } + .map { + case (conn, Right(result)) => + LoadSequencerEndpointInformationResult.Valid( + conn, + result, + staticDomainParameters, + ) + case (conn, Left(result)) => + LoadSequencerEndpointInformationResult.NotValid(conn, result) + } + + private def run( + expectDomainId: Option[DomainId], + args: List[ + (SequencerAlias, Endpoint, Either[SequencerInfoLoaderError, DomainClientBootstrapInfo]) + ], + activeOnly: Boolean = false, + ) = SequencerInfoLoader + .validateNewSequencerConnectionResults( + expectDomainId, + if (activeOnly) SequencerConnectionValidation.Active else SequencerConnectionValidation.All, + logger, + )(mapArgs(args)) + + private def hasError( + expectDomainId: Option[DomainId], + args: List[ + (SequencerAlias, Endpoint, Either[SequencerInfoLoaderError, DomainClientBootstrapInfo]) + ], + activeOnly: Boolean = false, + )(check: String => Assertion): Assertion = { + val result = run(expectDomainId, args, activeOnly) + result.left.value should have length (1) + result.left.value.foreach(x => check(x.error.cause)) + succeed + } + + "endpoint result validation" should { + "left is returned as left" in { + hasError( + None, + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + ( + sequencerAlias2, + endpoint2, + Left(SequencerInfoLoaderError.InvalidState("booh")), + ), + ), + )(_ should include("booh")) + } + "detect mismatches in domain-id" in { + hasError( + None, + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + (sequencerAlias2, endpoint2, Right(DomainClientBootstrapInfo(domainId2, sequencer2))), + ), + )(_ should include("Domain-id mismatch")) + } + "detect if domain-id does not match expected one" in { + hasError( + Some(domainId2), + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))) + ), + )(_ should include("does not match expected")) + } + "detect mismatches in sequencer-id between an alias" in { + hasError( + None, + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + (sequencerAlias1, endpoint2, Right(DomainClientBootstrapInfo(domainId1, sequencer2))), + ), + )(_ should include("sequencer-id mismatch")) + } + "detect the same sequencer-id among different sequencer aliases" in { + hasError( + None, + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + (sequencerAlias2, endpoint2, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + ), + )(_ should include("same sequencer-id reported by different alias")) + } + "accept if everything is fine" in { + run( + None, + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + (sequencerAlias2, endpoint2, Right(DomainClientBootstrapInfo(domainId1, sequencer2))), + ), + activeOnly = false, + ).value shouldBe (()) + } + } + + "aggregation" should { + + def aggregate( + args: List[ + (SequencerAlias, Endpoint, Either[SequencerInfoLoaderError, DomainClientBootstrapInfo]) + ] + ) = { + SequencerInfoLoader.aggregateBootstrapInfo( + logger, + sequencerTrustThreshold = PositiveInt.tryCreate(2), + submissionRequestAmplification = PositiveInt.one, + SequencerConnectionValidation.All, + )(mapArgs(args)) + } + + "accept if everything is fine" in { + aggregate( + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + (sequencerAlias2, endpoint2, Right(DomainClientBootstrapInfo(domainId1, sequencer2))), + ) + ) match { + case Right(_) => succeed + case Left(value) => fail(value.toString) + } + } + + "reject if we don't have enough sequencers" in { + aggregate( + List( + (sequencerAlias1, endpoint1, Right(DomainClientBootstrapInfo(domainId1, sequencer1))), + (sequencerAlias2, endpoint2, Left(SequencerInfoLoaderError.InvalidState("booh"))), + ) + ) match { + case Right(_) => fail("should not succeed") + case Left(_) => succeed + } + } + + } + +} diff --git a/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTransferData.scala b/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTransferData.scala index aeeed0fda..af2a2b7d8 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTransferData.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/data/GeneratorsTransferData.scala @@ -177,7 +177,7 @@ final class GeneratorsTransferData( contract <- serializableContractArb(canHaveEmptyKey = true).arbitrary creatingTransactionId <- Arbitrary.arbitrary[TransactionId] transferOutResultEvent <- deliveryTransferOutResultGen(contract, sourceProtocolVersion) - transferCounter <- transferCounterOGen + transferCounter <- transferCounterGen hashOps = TestHash // Not used for serialization @@ -203,7 +203,7 @@ final class GeneratorsTransferData( targetDomain <- Arbitrary.arbitrary[TargetDomainId] timeProof <- Arbitrary.arbitrary[TimeProof] - transferCounter <- transferCounterOGen + transferCounter <- transferCounterGen hashOps = TestHash // Not used for serialization diff --git a/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala b/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala index 0da290787..4991995e7 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/protocol/GeneratorsProtocol.scala @@ -37,7 +37,6 @@ final class GeneratorsProtocol( requiredSymmetricKeySchemes <- nonEmptySetGen[SymmetricKeyScheme] requiredHashAlgorithms <- nonEmptySetGen[HashAlgorithm] requiredCryptoKeyFormats <- nonEmptySetGen[CryptoKeyFormat] - acsCommitmentsCatchUp <- Gen.option(Arbitrary.arbitrary[AcsCommitmentsCatchUpConfig]) parameters = StaticDomainParameters.create( requiredSigningKeySchemes, @@ -46,7 +45,6 @@ final class GeneratorsProtocol( requiredHashAlgorithms, requiredCryptoKeyFormats, protocolVersion, - acsCommitmentsCatchUp, ) } yield parameters) @@ -79,6 +77,8 @@ final class GeneratorsProtocol( sequencerAggregateSubmissionTimeout <- Arbitrary.arbitrary[NonNegativeFiniteDuration] onboardingRestriction <- Arbitrary.arbitrary[OnboardingRestriction] + acsCommitmentsCatchupConfig <- Gen.option(Arbitrary.arbitrary[AcsCommitmentsCatchUpConfig]) + dynamicDomainParameters = DynamicDomainParameters.tryCreate( confirmationResponseTimeout, mediatorReactionTimeout, @@ -92,6 +92,7 @@ final class GeneratorsProtocol( sequencerAggregateSubmissionTimeout, trafficControlConfig, onboardingRestriction, + acsCommitmentsCatchupConfig, )(representativePV) } yield dynamicDomainParameters diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputationsTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputationsTest.scala new file mode 100644 index 000000000..6845970da --- /dev/null +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/MediatorGroupDeltaComputationsTest.scala @@ -0,0 +1,236 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.topology + +import com.digitalasset.canton.BaseTest +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.crypto.Fingerprint +import com.digitalasset.canton.topology.transaction.MediatorDomainStateX +import org.scalatest.wordspec.AnyWordSpec + +import scala.annotation.nowarn + +@nowarn("msg=match may not be exhaustive") +class MediatorGroupDeltaComputationsTest extends AnyWordSpec with BaseTest { + private def mediatorIdFor(idx: Int) = { + val namespace = Namespace(Fingerprint.tryCreate(s"m${idx}")) + MediatorId(Identifier.tryCreate(s"mediator$idx"), namespace) + } + + private lazy val Seq(m1, m2, m3, m4) = (1 to 4).map(mediatorIdFor) + + def range(from: Int, to: Int): Seq[MediatorId] = from to to map mediatorIdFor + + def mds(active: Seq[MediatorId], observers: Seq[MediatorId]): Option[MediatorDomainStateX] = + Some( + MediatorDomainStateX + .create( + DefaultTestIdentities.domainId, + NonNegativeInt.zero, + PositiveInt.one, + active, + observers, + ) + .value + ) + + "MediatorGroupDeltaComputations.verifyProposalConsistency" should { + "succeed on non-overlapping mediatorIds" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = range(1, 2), + removes = range(3, 4), + observerAdds = range(5, 6), + observerRemoves = range(7, 8), + updateThreshold = None, + ) + .value shouldBe () + } + + "succeed when making active mediators observers and vice versa" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = range(1, 2), + removes = range(3, 4), + observerAdds = range(3, 4), + observerRemoves = range(1, 2), + updateThreshold = None, + ) + .value shouldBe () + } + + "complain about empty changes" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = Nil, + removes = Nil, + observerAdds = Nil, + observerRemoves = Nil, + updateThreshold = None, + ) + .leftOrFail("bad proposal") shouldBe "no mediator group changes proposed" + } + + "complain about overlapping adds and removes" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = range(1, 2), + removes = range(2, 3), + observerAdds = Nil, + observerRemoves = Nil, + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe "the same mediators MED::mediator2::m2 cannot be added and removed as active in the same proposal" + } + + "complain about overlapping adds and observer adds" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = range(1, 2), + removes = Nil, + observerAdds = range(2, 3), + observerRemoves = Nil, + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe "the same mediators MED::mediator2::m2 cannot be added as active and observer in the same proposal" + } + + "complain about overlapping observer adds and observer removes" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = Nil, + removes = Nil, + observerAdds = range(1, 2), + observerRemoves = range(2, 3), + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe "the same mediators MED::mediator2::m2 cannot be added and removed as observer in the same proposal" + } + + "complain about overlapping removes and observer removes" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = Nil, + removes = range(1, 2), + observerAdds = Nil, + observerRemoves = range(2, 3), + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe "the same mediators MED::mediator2::m2 cannot be removed as active and observer in the same proposal" + } + + "complain about multiple overlapping changes" in { + MediatorGroupDeltaComputations + .verifyProposalConsistency( + adds = range(1, 2) :+ m4, + removes = range(2, 4), + observerAdds = range(2, 3), + observerRemoves = range(1, 2), + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe + "the same mediators MED::mediator2::m2,MED::mediator4::m4 cannot be added and removed as active in the same proposal, " + + "the same mediators MED::mediator2::m2 cannot be added as active and observer in the same proposal, " + + "the same mediators MED::mediator2::m2 cannot be added and removed as observer in the same proposal, " + + "the same mediators MED::mediator2::m2 cannot be removed as active and observer in the same proposal" + } + } + + "MediatorGroupDeltaComputations.verifyProposalAgainstCurrentState" should { + + "succeed with a brand-new MDS with an active and an observer mediator" in { + MediatorGroupDeltaComputations.verifyProposalAgainstCurrentState( + None, + adds = Seq(m1), + removes = Nil, + observerAdds = Seq(m2), + observerRemoves = Nil, + updateThreshold = None, + ) + } + + "succeed adding new active and observer mediators to existing MDS" in { + MediatorGroupDeltaComputations.verifyProposalAgainstCurrentState( + mds(Seq(m1), Seq(m2)), + adds = Seq(m3), + removes = Nil, + observerAdds = Seq(m4), + observerRemoves = Nil, + updateThreshold = None, + ) + } + + "complain when adding existing active and observer mediators" in { + MediatorGroupDeltaComputations + .verifyProposalAgainstCurrentState( + mds(Seq(m1), Seq(m2, m3)), + adds = Seq(m1), + removes = Nil, + observerAdds = Seq(m2, m3), + observerRemoves = Nil, + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe "mediators MED::mediator1::m1 to be added already active, " + + "mediators MED::mediator2::m2,MED::mediator3::m3 to be added as observer already observer" + } + + "complain when removing non-existing active and observer mediators" in { + MediatorGroupDeltaComputations + .verifyProposalAgainstCurrentState( + mds(Seq(m1), Seq(m2, m3)), + adds = Nil, + removes = Seq(m2, m3), + observerAdds = Nil, + observerRemoves = Seq(m1), + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe "mediators MED::mediator2::m2,MED::mediator3::m3 to be removed not active, " + + "mediators MED::mediator1::m1 to be removed as observer not observer" + } + + "complain when removing last active mediator" in { + MediatorGroupDeltaComputations + .verifyProposalAgainstCurrentState( + mds(Seq(m1), Seq.empty), + adds = Nil, + removes = Seq(m1), + observerAdds = Nil, + observerRemoves = Nil, + updateThreshold = None, + ) + .leftOrFail( + "bad proposal" + ) shouldBe "mediator group without active mediators" + } + + "complain when setting threshold too high" in { + MediatorGroupDeltaComputations + .verifyProposalAgainstCurrentState( + mds(Seq(m1), Seq.empty), + adds = Nil, + removes = Nil, + observerAdds = Nil, + observerRemoves = Nil, + updateThreshold = Some(PositiveInt.two), + ) + .leftOrFail( + "bad proposal" + ) shouldBe "mediator group threshold 2 larger than active mediator size 1" + } + } +} diff --git a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreXTest.scala b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreXTest.scala index e1bf57dc1..5c5f2c44e 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreXTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/topology/store/TopologyStoreXTest.scala @@ -284,7 +284,6 @@ trait TopologyStoreXTest extends AsyncWordSpec with TopologyStoreXTestBase { TimeQuery.HeadState, types = Seq(PartyToParticipantX.code), ) - } yield { expectTransactions( proposalTransactions, @@ -307,6 +306,43 @@ trait TopologyStoreXTest extends AsyncWordSpec with TopologyStoreXTestBase { ) } } + "able to findEssentialStateAtSequencedTime" in { + val store = mk() + for { + _ <- update(store, ts2, add = Seq(tx2_OTK)) + _ <- update(store, ts5, add = Seq(tx5_DTC)) + _ <- update(store, ts6, add = Seq(tx6_MDS)) + + proposalTransactions <- store.findEssentialStateAtSequencedTime( + asOfInclusive = SequencedTime(ts6), + excludeMappings = Nil, + ) + + proposalTransactionsFiltered <- store.findEssentialStateAtSequencedTime( + asOfInclusive = SequencedTime(ts6), + excludeMappings = TopologyMappingX.Code.all.diff( + Seq(DomainTrustCertificateX.code, OwnerToKeyMappingX.code) + ), + ) + + } yield { + expectTransactions( + proposalTransactions, + Seq( + tx2_OTK, + tx5_DTC, + tx6_MDS, + ), + ) + expectTransactions( + proposalTransactionsFiltered, + Seq( + tx2_OTK, + tx5_DTC, + ), + ) + } + } "able to inspect" in { val store = mk() @@ -423,9 +459,9 @@ trait TopologyStoreXTest extends AsyncWordSpec with TopologyStoreXTestBase { ), ) - essentialStateTransactions <- store.findEssentialStateForMember( - tx2_OTK.mapping.member, - asOfInclusive = ts5, + essentialStateTransactions <- store.findEssentialStateAtSequencedTime( + asOfInclusive = SequencedTime(ts5), + excludeMappings = Nil, ) upcomingTransactions <- store.findUpcomingEffectiveChanges(asOfInclusive = ts4) @@ -459,11 +495,9 @@ trait TopologyStoreXTest extends AsyncWordSpec with TopologyStoreXTestBase { expectTransactions( essentialStateTransactions, Seq( - tx1_NSD_Proposal, tx2_OTK, tx3_IDD_Removal, tx3_NSD, - tx3_PTP_Proposal, tx4_DND, tx4_OTK_Proposal, tx5_PTP, diff --git a/community/common/src/test/scala/com/digitalasset/canton/util/IterableUtilTest.scala b/community/common/src/test/scala/com/digitalasset/canton/util/IterableUtilTest.scala index 4474a1128..d1177dab2 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/util/IterableUtilTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/util/IterableUtilTest.scala @@ -8,6 +8,8 @@ import com.digitalasset.canton.BaseTest import com.digitalasset.canton.util.IterableUtilTest.CompareOnlyFirst import org.scalatest.wordspec.AnyWordSpec +import scala.annotation.tailrec + class IterableUtilTest extends AnyWordSpec with BaseTest { "spansBy" should { "work on a simple example" in { @@ -85,6 +87,52 @@ class IterableUtilTest extends AnyWordSpec with BaseTest { .reverse } } + + "splitAfter" should { + "split after the elements" in { + IterableUtil.splitAfter(1 to 12)(isPrime) shouldBe + Seq( + NonEmpty(Seq, 1, 2), + NonEmpty(Seq, 3), + NonEmpty(Seq, 4, 5), + NonEmpty(Seq, 6, 7), + NonEmpty(Seq, 8, 9, 10, 11), + NonEmpty(Seq, 12), + ) + } + + "handle the empty sequence gracefulle" in { + IterableUtil.splitAfter(Seq.empty[Int])(_ => true) shouldBe Seq.empty + IterableUtil.splitAfter(Seq.empty[Int])(_ => false) shouldBe Seq.empty + } + + "work if no elements satify the predicate" in { + IterableUtil.splitAfter(1 to 10)(_ >= 11) shouldBe Seq(NonEmpty(Seq, 1, 2 to 10: _*)) + } + + "evaluate the predicate only on arguments" in { + IterableUtil.splitAfter(1 to 10)(x => + if (x >= 1 && x <= 10) x == 5 + else throw new IllegalArgumentException(s"Predicate evaluated on $x") + ) shouldBe Seq(NonEmpty(Seq, 1, 2, 3, 4, 5), NonEmpty(Seq, 6, 7, 8, 9, 10)) + } + + // This should run in a couple of hundreds of milliseconds + "work for long lists efficiently" in { + val count = 100000 + IterableUtil.splitAfter((1 to count).toVector)(_ => true) shouldBe (1 to count).map( + NonEmpty(Seq, _) + ) + } + } + + @tailrec + private def isPrime(i: Int): Boolean = { + if (i == Integer.MIN_VALUE) false + else if (i < 0) isPrime(-i) + else if (i < 2) false + else (2 to Math.sqrt(i.toDouble).toInt).forall(d => i % d != 0) + } } object IterableUtilTest { diff --git a/community/common/src/test/scala/com/digitalasset/canton/util/SeqUtilTest.scala b/community/common/src/test/scala/com/digitalasset/canton/util/SeqUtilTest.scala index 72040a702..130a4be43 100644 --- a/community/common/src/test/scala/com/digitalasset/canton/util/SeqUtilTest.scala +++ b/community/common/src/test/scala/com/digitalasset/canton/util/SeqUtilTest.scala @@ -3,53 +3,13 @@ package com.digitalasset.canton.util -import com.daml.nonempty.NonEmpty import com.digitalasset.canton.BaseTest import org.scalatest.wordspec.AnyWordSpec -import scala.annotation.tailrec import scala.util.Random class SeqUtilTest extends AnyWordSpec with BaseTest { - @tailrec - final def isPrime(i: Int): Boolean = { - if (i == Integer.MIN_VALUE) false - else if (i < 0) isPrime(-i) - else if (i < 2) false - else (2 to Math.sqrt(i.toDouble).toInt).forall(d => i % d != 0) - } - - "splitAfter" should { - "split after the elements" in { - SeqUtil.splitAfter(1 to 12)(isPrime) shouldBe - Seq( - NonEmpty(Seq, 1, 2), - NonEmpty(Seq, 3), - NonEmpty(Seq, 4, 5), - NonEmpty(Seq, 6, 7), - NonEmpty(Seq, 8, 9, 10, 11), - NonEmpty(Seq, 12), - ) - } - - "handle the empty sequence gracefulle" in { - SeqUtil.splitAfter(Seq.empty[Int])(_ => true) shouldBe Seq.empty - SeqUtil.splitAfter(Seq.empty[Int])(_ => false) shouldBe Seq.empty - } - - "work if no elements satify the predicate" in { - SeqUtil.splitAfter(1 to 10)(_ >= 11) shouldBe Seq(NonEmpty(Seq, 1, 2 to 10: _*)) - } - - "evaluate the predicate only on arguments" in { - SeqUtil.splitAfter(1 to 10)(x => - if (x >= 1 && x <= 10) x == 5 - else throw new IllegalArgumentException(s"Predicate evaluated on $x") - ) shouldBe Seq(NonEmpty(Seq, 1, 2, 3, 4, 5), NonEmpty(Seq, 6, 7, 8, 9, 10)) - } - } - "randomSubsetShuffle" should { "pick a random subset of the given size" in { val iterations = 1000 diff --git a/community/demo/src/main/daml/ai-analysis/daml.yaml b/community/demo/src/main/daml/ai-analysis/daml.yaml index 3fa0af67e..441f4aae2 100644 --- a/community/demo/src/main/daml/ai-analysis/daml.yaml +++ b/community/demo/src/main/daml/ai-analysis/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: ai-analysis diff --git a/community/demo/src/main/daml/bank/daml.yaml b/community/demo/src/main/daml/bank/daml.yaml index ddeace9ab..49a4d9361 100644 --- a/community/demo/src/main/daml/bank/daml.yaml +++ b/community/demo/src/main/daml/bank/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: bank diff --git a/community/demo/src/main/daml/doctor/daml.yaml b/community/demo/src/main/daml/doctor/daml.yaml index ac1255063..7452c417f 100644 --- a/community/demo/src/main/daml/doctor/daml.yaml +++ b/community/demo/src/main/daml/doctor/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: doctor diff --git a/community/demo/src/main/daml/health-insurance/daml.yaml b/community/demo/src/main/daml/health-insurance/daml.yaml index 0af4cc08a..763523f2f 100644 --- a/community/demo/src/main/daml/health-insurance/daml.yaml +++ b/community/demo/src/main/daml/health-insurance/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: health-insurance diff --git a/community/demo/src/main/daml/medical-records/daml.yaml b/community/demo/src/main/daml/medical-records/daml.yaml index 45e08487a..0a2ceed55 100644 --- a/community/demo/src/main/daml/medical-records/daml.yaml +++ b/community/demo/src/main/daml/medical-records/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: medical-records diff --git a/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala b/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala index 29b4aa7ce..3771e5269 100644 --- a/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala +++ b/community/demo/src/main/scala/com/digitalasset/canton/demo/ReferenceDemoScript.scala @@ -189,7 +189,7 @@ class ReferenceDemoScript( name: String, connection: SequencerConnection, ): Unit = { - participant.domains.connect( + participant.domains.connect_by_config( DomainConnectionConfig( name, SequencerConnections.single(connection), @@ -639,13 +639,13 @@ object ReferenceDemoScript { val loggerFactory = consoleEnvironment.environment.loggerFactory // update domain parameters - banking.topology.domain_parameters.set_reconciliation_interval( + banking.topology.domain_parameters.propose_update( bankingDomainId, - config.PositiveDurationSeconds.ofSeconds(1), + _.update(reconciliationInterval = config.PositiveDurationSeconds.ofSeconds(1)), ) - medical.topology.domain_parameters.set_reconciliation_interval( + medical.topology.domain_parameters.propose_update( medicalDomainId, - config.PositiveDurationSeconds.ofSeconds(1), + _.update(reconciliationInterval = config.PositiveDurationSeconds.ofSeconds(1)), ) val script = new ReferenceDemoScript( diff --git a/community/domain/src/main/protobuf/buf.yaml b/community/domain/src/main/protobuf/buf.yaml index a2d816c30..f9151e12c 100644 --- a/community/domain/src/main/protobuf/buf.yaml +++ b/community/domain/src/main/protobuf/buf.yaml @@ -2,18 +2,21 @@ version: v1 build: excludes: - com/digitalasset/canton/domain/scalapb + - com/digitalasset/canton/mediator/scalapb + - com/digitalasset/canton/sequencer/scalapb breaking: ignore: - - com/digitalasset/canton/domain/admin/ + - com/digitalasset/canton/sequencer/admin/ + - com/digitalasset/canton/mediator/admin/ lint: ignore_only: # TODO(i16943) Remove coupling between pruning endpoints RPC_REQUEST_RESPONSE_UNIQUE: - - com/digitalasset/canton/domain/admin/v30/mediator_administration_service.proto - - com/digitalasset/canton/domain/admin/v30/sequencer_pruning_administration_service.proto + - com/digitalasset/canton/mediator/admin/v30/mediator_administration_service.proto + - com/digitalasset/canton/sequencer/admin/v30/sequencer_pruning_administration_service.proto RPC_REQUEST_STANDARD_NAME: - - com/digitalasset/canton/domain/admin/v30/mediator_administration_service.proto - - com/digitalasset/canton/domain/admin/v30/sequencer_pruning_administration_service.proto + - com/digitalasset/canton/mediator/admin/v30/mediator_administration_service.proto + - com/digitalasset/canton/sequencer/admin/v30/sequencer_pruning_administration_service.proto RPC_RESPONSE_STANDARD_NAME: - - com/digitalasset/canton/domain/admin/v30/mediator_administration_service.proto - - com/digitalasset/canton/domain/admin/v30/sequencer_pruning_administration_service.proto + - com/digitalasset/canton/mediator/admin/v30/mediator_administration_service.proto + - com/digitalasset/canton/sequencer/admin/v30/sequencer_pruning_administration_service.proto diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/mediator_administration_service.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/mediator_administration_service.proto similarity index 97% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/mediator_administration_service.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/mediator_administration_service.proto index 1b59a5b51..ba2fbd044 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/mediator_administration_service.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/mediator_administration_service.proto @@ -3,7 +3,7 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.mediator.admin.v30; import "com/digitalasset/canton/admin/pruning/v30/pruning.proto"; import "google/protobuf/timestamp.proto"; diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/mediator_initialization_service.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/mediator_initialization_service.proto similarity index 83% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/mediator_initialization_service.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/mediator_initialization_service.proto index 57dda5bb9..349455166 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/mediator_initialization_service.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/mediator_initialization_service.proto @@ -3,7 +3,7 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.mediator.admin.v30; import "com/digitalasset/canton/admin/domain/v30/sequencer_connection.proto"; import "com/digitalasset/canton/protocol/v30/sequencing.proto"; @@ -25,6 +25,9 @@ message InitializeMediatorRequest { // how should the member connect to the domain sequencer com.digitalasset.canton.admin.domain.v30.SequencerConnections sequencer_connections = 3; // required + + // how strictly we should validate the connection to the sequencer + com.digitalasset.canton.admin.domain.v30.SequencerConnectionValidation sequencer_connection_validation = 4; // required } message InitializeMediatorResponse {} diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_connection_service.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/sequencer_connection_service.proto similarity index 81% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_connection_service.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/sequencer_connection_service.proto index d6392d0ae..78e8bf17a 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_connection_service.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/admin/v30/sequencer_connection_service.proto @@ -3,12 +3,12 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.mediator.admin.v30; import "com/digitalasset/canton/admin/domain/v30/sequencer_connection.proto"; // service used by sequencer clients to manage connection to the sequencer -// used by the mediator and domain manager nodes +// used by the mediator service SequencerConnectionService { rpc GetConnection(GetConnectionRequest) returns (GetConnectionResponse); rpc SetConnection(SetConnectionRequest) returns (SetConnectionResponse); @@ -22,6 +22,7 @@ message GetConnectionResponse { message SetConnectionRequest { com.digitalasset.canton.admin.domain.v30.SequencerConnections sequencer_connections = 1; // required + com.digitalasset.canton.admin.domain.v30.SequencerConnectionValidation sequencer_connection_validation = 2; // required } message SetConnectionResponse {} diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/scalapb/package.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/scalapb/package.proto new file mode 100644 index 000000000..f67de52fe --- /dev/null +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/mediator/scalapb/package.proto @@ -0,0 +1,14 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +import "scalapb/scalapb.proto"; + +package com.digitalasset.canton.mediator; + +option (scalapb.options) = { + scope: PACKAGE + preserve_unknown_fields: false + no_default_values_in_constructor: true +}; diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_administration_service.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_administration_service.proto similarity index 58% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_administration_service.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_administration_service.proto index e232d26c6..b7c8d2d16 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_administration_service.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_administration_service.proto @@ -3,11 +3,14 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.sequencer.admin.v30; import "com/digitalasset/canton/admin/traffic/v30/member_traffic_status.proto"; -import "com/digitalasset/canton/domain/admin/v30/sequencer_initialization_snapshot.proto"; +import "com/digitalasset/canton/protocol/v30/sequencing.proto"; +import "com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_snapshot.proto"; +import "com/digitalasset/canton/topology/admin/v30/topology_ext.proto"; import "google/protobuf/timestamp.proto"; +import "scalapb/scalapb.proto"; // administration service for sequencer instances service SequencerAdministrationService { @@ -27,6 +30,14 @@ service SequencerAdministrationService { // fetch a snapshot of the sequencer state based on the given timestamp rpc Snapshot(SnapshotRequest) returns (SnapshotResponse); + // Fetch the onboarding state for a given sequencer. + // the returned bytestring can be used directly to initialize the given sequencer later on + rpc OnboardingState(OnboardingStateRequest) returns (OnboardingStateResponse); + + // Fetch the genesis state for a given sequencer. In the genesis state, we exclude VettedPackages transactions. + // the returned bytestring can be used directly to initialize the given sequencer later on + rpc GenesisState(GenesisStateRequest) returns (GenesisStateResponse); + // Disable members at the sequencer. Will prevent existing and new instances from connecting, and permit removing their data. rpc DisableMember(DisableMemberRequest) returns (DisableMemberResponse); } @@ -75,6 +86,55 @@ message SnapshotResponse { } } +message OnboardingStateRequest { + oneof request { + // The sequencer for which to fetch the onboarding state + string sequencer_id = 1; + // The effective time the should be "contained" in the sequencer snapshot + google.protobuf.Timestamp timestamp = 2; + } +} + +message OnboardingStateResponse { + message Success { + // versioned OnboardingStateForSequencer + bytes onboarding_state_for_sequencer = 1; + } + message Failure { + string reason = 1; + } + oneof value { + Success success = 1; + Failure failure = 2; + } +} +message GenesisStateRequest { + // Optional - the effective time used to fetch the topology transactions + google.protobuf.Timestamp timestamp = 1; +} + +message GenesisStateResponse { + message Success { + // versioned stored topology transactions + bytes genesis_state_for_sequencer = 1; + } + message Failure { + string reason = 1; + } + oneof value { + Success success = 1; + Failure failure = 2; + } +} + +message OnboardingStateForSequencer { + option (scalapb.message).companion_extends = "com.digitalasset.canton.version.UnstableProtoVersion"; + + com.digitalasset.canton.topology.admin.v30.TopologyTransactions topology_snapshot = 1; + com.digitalasset.canton.protocol.v30.StaticDomainParameters static_domain_parameters = 2; + SequencerSnapshot sequencer_snapshot = 3; +} + message PruningStatusRequest {} message PruningStatusResponse { diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_initialization_service.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_service.proto similarity index 63% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_initialization_service.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_service.proto index 1f86aba86..2a9336dd6 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_initialization_service.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_service.proto @@ -3,10 +3,9 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.sequencer.admin.v30; import "com/digitalasset/canton/protocol/v30/sequencing.proto"; -import "com/digitalasset/canton/topology/admin/v30/topology_ext.proto"; // Service allowing a Domain node to initialize the sequencer instance. // Should typically only be exposed to the domain node and not all clients @@ -17,39 +16,31 @@ service SequencerInitializationService { // and will immediately attempt to use it. // If the request is received after the sequencer has been successfully initialized it should return successfully // if the domain_id matches the domain that the sequencer has been initialized for, otherwise it should fail. - rpc InitializeSequencer(InitializeSequencerRequest) returns (InitializeSequencerResponse); - rpc InitializeSequencerVersioned(InitializeSequencerVersionedRequest) returns (InitializeSequencerVersionedResponse); + rpc InitializeSequencerFromGenesisState(InitializeSequencerFromGenesisStateRequest) returns (InitializeSequencerFromGenesisStateResponse); + rpc InitializeSequencerFromOnboardingState(InitializeSequencerFromOnboardingStateRequest) returns (InitializeSequencerFromOnboardingStateResponse); } // Includes sufficient detail for: // - the sequencer to generate its own key to return the public key to the domain node // - topology and additional bootstrap information -message InitializeSequencerRequest { +message InitializeSequencerFromGenesisStateRequest { // a topology snapshot up until (including) the point where this sequencer is becoming active on the domain // the topology snapshot will be persisted in the domain store of the sequencer - com.digitalasset.canton.topology.admin.v30.TopologyTransactions topology_snapshot = 1; + bytes topology_snapshot = 1; com.digitalasset.canton.protocol.v30.StaticDomainParameters domain_parameters = 2; - - // optional - snapshot to initialize sequencer from, as a versioned SequencerSnapshot of version v1 or higher - bytes snapshot = 3; } -message InitializeSequencerResponse { +message InitializeSequencerFromGenesisStateResponse { // Indicate if the initialized sequencer is replicated bool replicated = 1; } -message InitializeSequencerVersionedRequest { - bytes topology_snapshot = 1; - - com.digitalasset.canton.protocol.v30.StaticDomainParameters domain_parameters = 2; - - // optional - snapshot to initialize sequencer from, as a versioned SequencerSnapshot of version v1 or higher - bytes snapshot = 3; +message InitializeSequencerFromOnboardingStateRequest { + bytes onboarding_state = 1; } -message InitializeSequencerVersionedResponse { +message InitializeSequencerFromOnboardingStateResponse { // Indicate if the initialized sequencer is replicated bool replicated = 1; } diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_initialization_snapshot.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_snapshot.proto similarity index 98% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_initialization_snapshot.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_snapshot.proto index 8043113c2..d3a1e4bd7 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_initialization_snapshot.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_initialization_snapshot.proto @@ -3,7 +3,7 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.sequencer.admin.v30; import "com/digitalasset/canton/crypto/v30/crypto.proto"; import "com/digitalasset/canton/protocol/v30/sequencing.proto"; diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_pruning_administration_service.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_pruning_administration_service.proto similarity index 97% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_pruning_administration_service.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_pruning_administration_service.proto index 9b58f7bb8..74dedea6f 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_pruning_administration_service.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_pruning_administration_service.proto @@ -3,7 +3,7 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.sequencer.admin.v30; import "com/digitalasset/canton/admin/pruning/v30/pruning.proto"; import "google/protobuf/timestamp.proto"; diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_version_service.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_version_service.proto similarity index 94% rename from community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_version_service.proto rename to community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_version_service.proto index 370cca09f..ff56c28fe 100644 --- a/community/domain/src/main/protobuf/com/digitalasset/canton/domain/admin/v30/sequencer_version_service.proto +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/admin/v30/sequencer_version_service.proto @@ -3,7 +3,7 @@ syntax = "proto3"; -package com.digitalasset.canton.domain.admin.v30; +package com.digitalasset.canton.sequencer.admin.v30; import "com/digitalasset/canton/protocol/v30/sequencing.proto"; diff --git a/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/scalapb/package.proto b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/scalapb/package.proto new file mode 100644 index 000000000..52022da6a --- /dev/null +++ b/community/domain/src/main/protobuf/com/digitalasset/canton/sequencer/scalapb/package.proto @@ -0,0 +1,14 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +syntax = "proto3"; + +package com.digitalasset.canton.sequencer; + +import "scalapb/scalapb.proto"; + +option (scalapb.options) = { + scope: PACKAGE + preserve_unknown_fields: false + no_default_values_in_constructor: true +}; diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala index 172ffb2f0..ef7e9aca2 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockSequencerStateManager.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.domain.block -import cats.Monad +import cats.data.Nested import cats.syntax.functor.* import com.daml.error.BaseError import com.daml.nameof.NameOf.functionFullName @@ -13,10 +13,10 @@ import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.block import com.digitalasset.canton.domain.block.BlockSequencerStateManager.HeadState +import com.digitalasset.canton.domain.block.BlockUpdateGenerator.BlockChunk import com.digitalasset.canton.domain.block.data.{ BlockEphemeralState, BlockInfo, - BlockUpdateClosureWithHeight, EphemeralState, SequencerBlockStore, } @@ -26,25 +26,21 @@ import com.digitalasset.canton.domain.sequencing.sequencer.block.BlockSequencer import com.digitalasset.canton.domain.sequencing.sequencer.errors.CreateSubscriptionError import com.digitalasset.canton.domain.sequencing.sequencer.traffic.SequencerRateLimitManager import com.digitalasset.canton.error.SequencerBaseError -import com.digitalasset.canton.lifecycle.{ - AsyncCloseable, - AsyncOrSyncCloseable, - CloseContext, - FlagCloseableAsync, - FutureUnlessShutdown, -} +import com.digitalasset.canton.lifecycle.{AsyncCloseable, AsyncOrSyncCloseable, FlagCloseableAsync} import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.pekkostreams.dispatcher.Dispatcher import com.digitalasset.canton.pekkostreams.dispatcher.SubSource.RangeSource import com.digitalasset.canton.sequencing.client.SequencerSubscriptionError import com.digitalasset.canton.topology.{DomainId, Member, SequencerId} import com.digitalasset.canton.tracing.{TraceContext, Traced} +import com.digitalasset.canton.util.PekkoUtil.syntax.* import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.{ErrorUtil, MapsUtil, MonadUtil} import com.digitalasset.canton.version.ProtocolVersion -import org.apache.pekko.Done +import com.google.common.annotations.VisibleForTesting import org.apache.pekko.stream.KillSwitches -import org.apache.pekko.stream.scaladsl.Keep +import org.apache.pekko.stream.scaladsl.{Flow, Keep} +import org.apache.pekko.{Done, NotUsed} import java.util.concurrent.atomic.AtomicReference import scala.collection.concurrent.TrieMap @@ -76,23 +72,24 @@ trait BlockSequencerStateManagerBase extends FlagCloseableAsync { /** Check whether a member is currently enabled based on the latest state. */ def isMemberEnabled(member: Member): Boolean - def handleBlock( - updateClosure: BlockUpdateClosureWithHeight - ): FutureUnlessShutdown[BlockEphemeralState] - - def handleLocalEvent( - event: BlockSequencer.LocalEvent - )(implicit traceContext: TraceContext): Future[Unit] + /** Flow to turn [[com.digitalasset.canton.domain.block.BlockEvents]] of one block + * into a series of [[com.digitalasset.canton.domain.block.OrderedBlockUpdate]]s + * that are to be persisted subsequently using [[applyBlockUpdate]]. + */ + def processBlock( + bug: BlockUpdateGenerator + ): Flow[BlockEvents, Traced[OrderedBlockUpdate], NotUsed] - def pruneLocalDatabase( - timestamp: CantonTimestamp - )(implicit traceContext: TraceContext): Future[Unit] + /** Persists the [[com.digitalasset.canton.domain.block.BlockUpdate]]s and completes the waiting RPC calls + * as necessary. + */ + def applyBlockUpdate: Flow[Traced[BlockUpdate], Traced[CantonTimestamp], NotUsed] /** Wait for a member to be disabled on the underlying ledger */ def waitForMemberToBeDisabled(member: Member): Future[Unit] /** Wait for the sequencer pruning request to have been processed and get the returned message */ - def waitForPruningToComplete(timestamp: CantonTimestamp): Future[String] + def waitForPruningToComplete(timestamp: CantonTimestamp): (Boolean, Future[Unit]) /** Wait for the member's acknowledgement to have been processed */ def waitForAcknowledgementToComplete(member: Member, timestamp: CantonTimestamp)(implicit @@ -115,15 +112,14 @@ class BlockSequencerStateManager( override protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, rateLimitManager: SequencerRateLimitManager, -)(implicit executionContext: ExecutionContext, closeContext: CloseContext) +)(implicit executionContext: ExecutionContext) extends BlockSequencerStateManagerBase with NamedLogging { import BlockSequencerStateManager.* - private val memberRegistrationPromises = TrieMap[Member, Promise[CantonTimestamp]]() private val memberDisablementPromises = TrieMap[Member, Promise[Unit]]() - private val sequencerPruningPromises = TrieMap[CantonTimestamp, Promise[String]]() + private val sequencerPruningPromises = TrieMap[CantonTimestamp, Promise[Unit]]() private val memberAcknowledgementPromises = TrieMap[Member, NonEmpty[SortedMap[CantonTimestamp, Traced[Promise[Unit]]]]]() @@ -162,65 +158,125 @@ class BlockSequencerStateManager( headState.get().chunk.ephemeral.registeredMembers.contains(member) /** Check whether a member is currently enabled based on the latest state. */ - override def isMemberEnabled(member: Member): Boolean = - headState.get().chunk.ephemeral.status.members.exists(s => s.enabled && s.member == member) + override def isMemberEnabled(member: Member): Boolean = { + val headStatus = headState.get().chunk.ephemeral.status + headStatus.membersMap.contains(member) && !headStatus.disabledMembers.contains(member) + } - override def handleBlock( - updateClosure: BlockUpdateClosureWithHeight - ): FutureUnlessShutdown[BlockEphemeralState] = { - implicit val traceContext: TraceContext = updateClosure.blockTraceContext - closeContext.context.performUnlessClosingUSF("handleBlock") { + override def processBlock( + bug: BlockUpdateGenerator + ): Flow[BlockEvents, Traced[OrderedBlockUpdate], NotUsed] = { + val head = getHeadState + val bugState = { + import TraceContext.Implicits.Empty.* + bug.internalStateFor(head.blockEphemeralState) + } + Flow[BlockEvents] + .via(checkBlockHeight(head.block.height)) + .via(chunkBlock(bug)) + .via(processChunk(bug)(bugState)) + } - val blockEphemeralState = { - headState.get().blockEphemeralState + private def checkBlockHeight( + initialHeight: Long + ): Flow[BlockEvents, Traced[BlockEvents], NotUsed] = + Flow[BlockEvents].statefulMapConcat(() => { + @SuppressWarnings(Array("org.wartremover.warts.Var")) + var currentBlockHeight = initialHeight + blockEvents => { + val height = blockEvents.height + + // TODO(M98 Tech-Debt Collection): consider validating that blocks with the same block height have the same contents + // Skipping blocks we have processed before. Can occur when the read-path flowable is re-started but not all blocks + // in the pipeline of the BlockSequencerStateManager have already been processed. + if (height <= currentBlockHeight) { + noTracingLogger.debug( + s"Skipping update with height $height since it was already processed. " + ) + Seq.empty + } else if ( + currentBlockHeight > block.UninitializedBlockHeight && height > currentBlockHeight + 1 + ) { + val msg = + s"Received block of height $height while the last processed block only had height $currentBlockHeight. " + + s"Expected to receive one block higher only." + noTracingLogger.error(msg) + throw new SequencerUnexpectedStateChange(msg) + } else { + implicit val traceContext: TraceContext = TraceContext.ofBatch(blockEvents.events)(logger) + // Set the current block height to the new block's height instead of + 1 of the previous value + // so that we support starting from an arbitrary block height + currentBlockHeight = height + Seq(Traced(blockEvents)) + } } - checkInvariantIfEnabled(blockEphemeralState) - val height = updateClosure.height - val lastBlockHeight = blockEphemeralState.latestBlock.height - - // TODO(M98 Tech-Debt Collection): consider validating that blocks with the same block height have the same contents - // Skipping blocks we have processed before. Can occur when the read-path flowable is re-started but not all blocks - // in the pipeline of the BlockSequencerStateManager have already been processed. - if (height <= lastBlockHeight) { - logger.debug(s"Skipping update with height $height since it was already processed. ")( - traceContext - ) - FutureUnlessShutdown.pure(blockEphemeralState) - } else if (lastBlockHeight > block.UninitializedBlockHeight && height > lastBlockHeight + 1) { - val msg = - s"Received block of height $height while the last processed block only had height $lastBlockHeight. " + - s"Expected to receive one block higher only." - logger.error(msg) - FutureUnlessShutdown.failed(new SequencerUnexpectedStateChange(msg)) - } else - updateClosure - .updateGenerator(blockEphemeralState) - .flatMap(handleUpdate) + }) + + private def chunkBlock( + bug: BlockUpdateGenerator + ): Flow[Traced[BlockEvents], Traced[BlockChunk], NotUsed] = + Flow[Traced[BlockEvents]].mapConcat(_.withTraceContext { implicit traceContext => blockEvents => + bug.chunkBlock(blockEvents).map(Traced(_)) + }) + + private def processChunk(bug: BlockUpdateGenerator)( + initialState: bug.InternalState + ): Flow[Traced[BlockChunk], Traced[OrderedBlockUpdate], NotUsed] = { + implicit val traceContext = TraceContext.empty + Flow[Traced[BlockChunk]].statefulMapAsyncUSAndDrain(initialState) { (state, tracedChunk) => + implicit val traceContext: TraceContext = tracedChunk.traceContext + tracedChunk.traverse(blockChunk => Nested(bug.processBlockChunk(state, blockChunk))).value } } - override def handleLocalEvent( - event: BlockSequencer.LocalEvent - )(implicit traceContext: TraceContext): Future[Unit] = event match { - case BlockSequencer.DisableMember(member) => locallyDisableMember(member) - case BlockSequencer.Prune(timestamp) => pruneLocalDatabase(timestamp) + override def applyBlockUpdate: Flow[Traced[BlockUpdate], Traced[CantonTimestamp], NotUsed] = { + implicit val traceContext = TraceContext.empty + Flow[Traced[BlockUpdate]].statefulMapAsync(getHeadState) { (priorHead, update) => + implicit val traceContext = update.traceContext + val fut = update.value match { + case LocalBlockUpdate(local) => + handleLocalEvent(priorHead, local)(TraceContext.todo) + case chunk: ChunkUpdate => + handleChunkUpdate(priorHead, chunk)(TraceContext.todo) + case complete: CompleteBlockUpdate => + handleComplete(priorHead, complete.block)(TraceContext.todo) + } + fut.map(newHead => newHead -> Traced(newHead.block.lastTs)) + } + } + + @VisibleForTesting + private[domain] def handleLocalEvent( + priorHead: HeadState, + event: BlockSequencer.LocalEvent, + )(implicit traceContext: TraceContext): Future[HeadState] = event match { + case BlockSequencer.DisableMember(member) => locallyDisableMember(priorHead, member) + case BlockSequencer.UpdateInitialMemberCounters(timestamp) => + updateInitialMemberCounters(timestamp).map { (_: Unit) => + // Pruning does not change the head state + priorHead + } } - override def pruneLocalDatabase( + private def updateInitialMemberCounters( timestamp: CantonTimestamp )(implicit traceContext: TraceContext): Future[Unit] = for { - msg <- store.prune(timestamp) initialCounters <- store.initialMemberCounters } yield { countersSupportedAfter.set(initialCounters) - resolveSequencerPruning(timestamp, msg) + resolveSequencerPruning(timestamp) } override def waitForMemberToBeDisabled(member: Member): Future[Unit] = memberDisablementPromises.getOrElseUpdate(member, Promise[Unit]()).future - override def waitForPruningToComplete(timestamp: CantonTimestamp): Future[String] = - sequencerPruningPromises.getOrElseUpdate(timestamp, Promise[String]()).future + override def waitForPruningToComplete(timestamp: CantonTimestamp): (Boolean, Future[Unit]) = { + val newPromise = Promise[Unit]() + val (isNew, promise) = sequencerPruningPromises + .putIfAbsent(timestamp, newPromise) + .fold((true, newPromise))(oldPromise => (false, oldPromise)) + (isNew, promise.future) + } override def waitForAcknowledgementToComplete(member: Member, timestamp: CantonTimestamp)(implicit traceContext: TraceContext @@ -309,8 +365,9 @@ class BlockSequencerStateManager( } private def locallyDisableMember( - member: Member - )(implicit traceContext: TraceContext): Future[Unit] = + priorHead: HeadState, + member: Member, + )(implicit traceContext: TraceContext): Future[HeadState] = store .partialBlockUpdate( newMembers = Map(), @@ -322,13 +379,12 @@ class BlockSequencerStateManager( ) .map { _ => import monocle.macros.syntax.lens.* - val head = getHeadState - val memebersMap = head.chunk.ephemeral.status.membersMap - val newHead = head - .focus(_.chunk.ephemeral.status.membersMap) - .replace(memebersMap.updated(member, memebersMap(member).copy(enabled = false))) - updateHeadState(head, newHead) + val newHead = priorHead + .focus(_.chunk.ephemeral.status.disabledMembers) + .modify(_ incl member) + updateHeadState(priorHead, newHead) resolveWaitingForMemberDisablement(member) + newHead } private def updateMemberCounterSupportedAfter(member: Member, counter: SequencerCounter)(implicit @@ -345,10 +401,9 @@ class BlockSequencerStateManager( }.discard ) - private def handleChunkUpdate(update: ChunkUpdate)(implicit + private def handleChunkUpdate(priorHead: HeadState, update: ChunkUpdate)(implicit batchTraceContext: TraceContext - ): Future[Unit] = { - val priorHead = headState.get() + ): Future[HeadState] = { val priorState = priorHead.chunk val chunkNumber = priorState.chunkNumber + 1 assert( @@ -374,7 +429,7 @@ class BlockSequencerStateManager( MapsUtil.mergeWith(_, _)((first, _) => first) ) firstSequencerCounterByMember.forall { case (member, firstSequencerCounter) => - priorState.ephemeral.heads.getOrElse(member, SequencerCounter.Genesis - 1L) == + priorState.ephemeral.headCounter(member).getOrElse(SequencerCounter.Genesis - 1L) == firstSequencerCounter - 1L } } @@ -387,9 +442,10 @@ class BlockSequencerStateManager( val lastTs = (update.signedEvents.view.flatMap(_.values.map(_.timestamp)) ++ update.newMembers.values).maxOption.getOrElse(priorState.lastTs) + val newState = ChunkState( chunkNumber, - update.state, + priorState.ephemeral.mergeBlockUpdateEphemeralState(update.state), lastTs, update.lastSequencerEventTimestamp.orElse(priorState.latestSequencerEventTimestamp), ) @@ -400,7 +456,7 @@ class BlockSequencerStateManager( newMembers = update.newMembers, events = update.signedEvents, acknowledgments = update.acknowledgements, - membersDisabled = update.membersDisabled, + membersDisabled = Seq.empty, inFlightAggregationUpdates = update.inFlightAggregationUpdates, update.state.trafficState, ) @@ -411,23 +467,14 @@ class BlockSequencerStateManager( case (member, tombstone) if tombstone.isTombstone => member -> tombstone.counter } ) { case (member, counter) => updateMemberCounterSupportedAfter(member, counter) } - _ <- // Advance the supported counters before we delete the data of the old counters - if (update.pruningRequests.nonEmpty) - store.initialMemberCounters.map(initial => countersSupportedAfter.set(initial)) - else Future.unit - _ <- Future.sequence(update.pruningRequests.map(_.withTraceContext { - pruneTraceContext => ts => - logger.debug("Performing sequencer pruning on local state")(pruneTraceContext) - store.prune(ts)(pruneTraceContext).map(resolveSequencerPruning(ts, _)) - })) } yield { // head state update must happen before member counters are updated // as otherwise, if we have a registration in between counter-signalling and head-state, // the dispatcher will be initialised with the old head state but not be notified about // a change. - updateHeadState(priorHead, priorHead.copy(chunk = newState)) + val newHead = priorHead.copy(chunk = newState) + updateHeadState(priorHead, newHead) signalMemberCountersToDispatchers(newState.ephemeral) - resolveWaitingForNewMembers(newState.ephemeral) resolveWaitingForMemberDisablement(newState.ephemeral) update.acknowledgements.foreach { case (member, timestamp) => resolveAcknowledgements(member, timestamp) @@ -435,69 +482,44 @@ class BlockSequencerStateManager( update.invalidAcknowledgements.foreach { case (member, timestamp, error) => invalidAcknowledgement(member, timestamp, error) } + newHead } } - private def handleUpdate(update: BlockUpdates)(implicit + private def handleComplete(priorHead: HeadState, newBlock: BlockInfo)(implicit blockTraceContext: TraceContext - ): FutureUnlessShutdown[BlockEphemeralState] = { - - def handleComplete(newBlock: BlockInfo): Future[BlockEphemeralState] = { - val priorHead = headState.get - val chunkState = priorHead.chunk - - assert( - chunkState.lastTs <= newBlock.lastTs, - s"The block's last timestamp must be at least the last timestamp of the last chunk", - ) - assert( - chunkState.latestSequencerEventTimestamp <= newBlock.latestSequencerEventTimestamp, - s"The block's latest topology client timestamp must be at least the last chunk's latest topology client timestamp", - ) - - val newState = BlockEphemeralState( - newBlock, - // We can expire the cached in-memory in-flight aggregations, - // but we must not expire the persisted aggregations - // because we still need them for computing a snapshot - chunkState.ephemeral.evictExpiredInFlightAggregations(newBlock.lastTs), - ) - checkInvariantIfEnabled(newState) - val newHead = HeadState.fullyProcessed(newState) - for { - _ <- store.finalizeBlockUpdate(newBlock) - } yield { - updateHeadState(priorHead, newHead) - // Use lastTs here under the following assumptions: - // 1. lastTs represents the timestamp of the last sequenced "send" event of the last block successfully processed - // Specifically, it is the last of the timestamps in the block passed to the rate limiter in the B.U.G for consumed and traffic updates methods. - // After setting safeForPruning to this timestamp, we will not be able to request balances from the balance manager prior to this timestamp. - // 2. This does not impose restrictions on the use of lastSequencerEventTimestamp when calling the rate limiter. - // Meaning it should be possible to use an old lastSequencerEventTimestamp when calling the rate limiter, even if it is older than lastTs here. - // If this changes, we we will need to use lastSequencerEventTimestamp here instead. - // 3. TODO(i15837): Under some HA failover scenarios, this may not be sufficient. Mainly because finalizeBlockUpdate above does not - // use synchronous commits for DB replicas. This has for consequence that theoretically a block could be finalized but not appear - // in the DB replica, while the pruning will be visible in the replica. This would lead the BUG to requesting balances for that block when - // reprocessing it, which would fail because the balances have been pruned. This needs to be considered when implementing HA for the BlockSequencer. - rateLimitManager.safeForPruning(newHead.block.lastTs) - newState - } - } + ): Future[HeadState] = { + val chunkState = priorHead.chunk + assert( + chunkState.lastTs <= newBlock.lastTs, + s"The block's last timestamp must be at least the last timestamp of the last chunk", + ) + assert( + chunkState.latestSequencerEventTimestamp <= newBlock.latestSequencerEventTimestamp, + s"The block's latest topology client timestamp must be at least the last chunk's latest topology client timestamp", + ) - def step( - updates: BlockUpdates - ): FutureUnlessShutdown[Either[BlockUpdates, BlockEphemeralState]] = { - updates match { - case PartialBlockUpdate(chunk, continuation) => - FutureUnlessShutdown - .outcomeF(handleChunkUpdate(chunk)) - .flatMap((_: Unit) => continuation.map(Left(_))) - case CompleteBlockUpdate(block) => - FutureUnlessShutdown.outcomeF(handleComplete(block)).map(Right(_)) - } + val newState = BlockEphemeralState(newBlock, chunkState.ephemeral) + checkInvariantIfEnabled(newState) + val newHead = HeadState.fullyProcessed(newState) + for { + _ <- store.finalizeBlockUpdate(newBlock) + } yield { + updateHeadState(priorHead, newHead) + // Use lastTs here under the following assumptions: + // 1. lastTs represents the timestamp of the last sequenced "send" event of the last block successfully processed + // Specifically, it is the last of the timestamps in the block passed to the rate limiter in the B.U.G for consumed and traffic updates methods. + // After setting safeForPruning to this timestamp, we will not be able to request balances from the balance manager prior to this timestamp. + // 2. This does not impose restrictions on the use of lastSequencerEventTimestamp when calling the rate limiter. + // Meaning it should be possible to use an old lastSequencerEventTimestamp when calling the rate limiter, even if it is older than lastTs here. + // If this changes, we we will need to use lastSequencerEventTimestamp here instead. + // 3. TODO(i15837): Under some HA failover scenarios, this may not be sufficient. Mainly because finalizeBlockUpdate above does not + // use synchronous commits for DB replicas. This has for consequence that theoretically a block could be finalized but not appear + // in the DB replica, while the pruning will be visible in the replica. This would lead the BUG to requesting balances for that block when + // reprocessing it, which would fail because the balances have been pruned. This needs to be considered when implementing HA for the BlockSequencer. + rateLimitManager.safeForPruning(newHead.block.lastTs) + newHead } - - Monad[FutureUnlessShutdown].tailRecM(update)(step) } private def updateHeadState(prior: HeadState, next: HeadState)(implicit @@ -516,19 +538,8 @@ class BlockSequencerStateManager( newState: EphemeralState ): Unit = { dispatchers.toList.foreach { case (member, dispatcher) => - newState.heads.get(member) match { - case Some(counter) => - dispatcher.signalNewHead(counter + 1L) - case None => - } - } - } - - private def resolveWaitingForNewMembers(newState: EphemeralState): Unit = { - // if any members that we're waiting to see are now registered members, complete those promises. - newState.status.members foreach { registeredMember => - memberRegistrationPromises.remove(registeredMember.member) foreach { promise => - promise.success(registeredMember.registeredAt) + newState.headCounter(member).foreach { counter => + dispatcher.signalNewHead(counter + 1L) } } } @@ -536,16 +547,15 @@ class BlockSequencerStateManager( private def resolveWaitingForMemberDisablement(newState: EphemeralState): Unit = { // if any members that we're waiting to see disabled are now disabled members, complete those promises. memberDisablementPromises.keys - .map(newState.status.membersMap(_)) - .filterNot(_.enabled) - .map(_.member) foreach (resolveWaitingForMemberDisablement) + .filter(newState.status.disabledMembers.contains) + .foreach(resolveWaitingForMemberDisablement) } private def resolveWaitingForMemberDisablement(disabledMember: Member): Unit = memberDisablementPromises.remove(disabledMember) foreach { promise => promise.success(()) } - private def resolveSequencerPruning(timestamp: CantonTimestamp, pruningMsg: String): Unit = { - sequencerPruningPromises.remove(timestamp) foreach { promise => promise.success(pruningMsg) } + private def resolveSequencerPruning(timestamp: CantonTimestamp): Unit = { + sequencerPruningPromises.remove(timestamp) foreach { promise => promise.success(()) } } /** Resolves all outstanding acknowledgements up to the given timestamp. @@ -638,7 +648,7 @@ class BlockSequencerStateManager( // and then add 1 to the counter head when setting the new dispatcher index // So empty means we pass Genesis counter (0), everything else is counter + 1 (so the first msg is signalled 1) val head = - headState.get().chunk.ephemeral.heads.get(member) match { + headState.get().chunk.ephemeral.headCounter(member) match { case Some(counter) => logger.debug( s"Creating dispatcher for [$member] from head=${counter + 1L}" @@ -668,7 +678,7 @@ class BlockSequencerStateManager( // the order is: updateHead, signalDispatchers // the race is therefore makeDispatcher, updateHead, signalDispatchers, update dispatchers // to avoid a blocking lock, we just poke the newly generated dispatcher if necessary - headState.get().chunk.ephemeral.heads.get(member).foreach { counter => + headState.get().chunk.ephemeral.headCounter(member).foreach { counter => if (dispatcher.getHead() != counter + 1) { dispatcher.signalNewHead(counter + 1) } @@ -696,7 +706,6 @@ object BlockSequencerStateManager { )(implicit executionContext: ExecutionContext, traceContext: TraceContext, - closeContext: CloseContext, ): Future[BlockSequencerStateManager] = for { counters <- store.initialMemberCounters diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdates.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdate.scala similarity index 72% rename from community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdates.scala rename to community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdate.scala index dde89807e..10a482c6a 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdates.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdate.scala @@ -9,31 +9,17 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.SequencerCounter import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.block.BlockUpdateGenerator.SignedEvents -import com.digitalasset.canton.domain.block.data.{BlockInfo, EphemeralState} +import com.digitalasset.canton.domain.block.data.{BlockInfo, BlockUpdateEphemeralState} import com.digitalasset.canton.domain.sequencing.sequencer.InFlightAggregationUpdates -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown +import com.digitalasset.canton.domain.sequencing.sequencer.block.BlockSequencer.LocalEvent import com.digitalasset.canton.topology.Member -import com.digitalasset.canton.tracing.Traced import com.digitalasset.canton.util.MapsUtil -/** A series of changes from processing the chunks of updates within a block. */ -sealed trait BlockUpdates extends Product with Serializable +/** Summarizes the updates that are to be persisted and signalled individually */ +sealed trait BlockUpdate extends Product with Serializable -/** A chunk of updates within a block. The updates can be delivered to - * [[com.digitalasset.canton.sequencing.client.SequencerClient]]s immediately, - * before fully processing the block. - * - * The next partial block update may depend on the events in the current chunk, - * e.g., by the topology processor processing them via its sequencer client subscription. - * For this reason, the next partial block update is wrapped in its own future, - * which can sync the topology updates via the topology client. - * - * @param continuation Computes the remainder of updates in a given block - */ -final case class PartialBlockUpdate( - chunk: ChunkUpdate, - continuation: FutureUnlessShutdown[BlockUpdates], -) extends BlockUpdates +/** Denotes an update that is generated from a block that went through ordering */ +sealed trait OrderedBlockUpdate extends BlockUpdate /** Signals that all updates in a block have been delivered as chunks. * The [[com.digitalasset.canton.domain.block.data.BlockInfo]] must be consistent with @@ -44,11 +30,9 @@ final case class PartialBlockUpdate( * must be at least the one from the last chunk or previous block. * - [[com.digitalasset.canton.domain.block.data.BlockInfo.height]] must be exactly one higher * than the previous block - * The consistency conditions are checked in `handleUpdate` + * The consistency conditions are checked in [[com.digitalasset.canton.domain.block.BlockSequencerStateManager]]'s `handleComplete`. */ -final case class CompleteBlockUpdate( - block: BlockInfo -) extends BlockUpdates +final case class CompleteBlockUpdate(block: BlockInfo) extends OrderedBlockUpdate /** Changes from processing a consecutive part of updates within a block from the blockchain. * We expect all values to be consistent with one another: @@ -59,27 +43,23 @@ final case class CompleteBlockUpdate( * - counter values for each member should be continuous * * @param newMembers Members that were added along with the timestamp that they are considered registered from. - * @param membersDisabled Members that were disabled. * @param acknowledgements The highest valid acknowledged timestamp for each member in the block. * @param invalidAcknowledgements All invalid acknowledgement timestamps in the block for each member. * @param signedEvents New sequenced events for members. * @param inFlightAggregationUpdates The updates to the in-flight aggregation states. - * Does not include the clean-up of expired aggregations. - * @param pruningRequests Upper bound timestamps to prune the sequencer's local state. + * Includes the clean-up of expired aggregations. * @param lastSequencerEventTimestamp The highest timestamp of an event in `events` addressed to the sequencer, if any. * @param state Updated ephemeral state to be used for processing subsequent chunks. */ final case class ChunkUpdate( newMembers: Map[Member, CantonTimestamp] = Map.empty, - membersDisabled: Seq[Member] = Seq.empty, acknowledgements: Map[Member, CantonTimestamp] = Map.empty, invalidAcknowledgements: Seq[(Member, CantonTimestamp, BaseError)] = Seq.empty, signedEvents: Seq[SignedEvents] = Seq.empty, inFlightAggregationUpdates: InFlightAggregationUpdates = Map.empty, - pruningRequests: Seq[Traced[CantonTimestamp]] = Seq.empty, lastSequencerEventTimestamp: Option[CantonTimestamp], - state: EphemeralState, -) { + state: BlockUpdateEphemeralState, +) extends OrderedBlockUpdate { // ensure that all new members appear in the ephemeral state require( newMembers.keys.forall(state.registeredMembers.contains), @@ -113,3 +93,6 @@ final case class ChunkUpdate( } // The other consistency conditions are checked in `BlockSequencerStateManager.handleChunkUpdate` } + +/** Denotes an update to the persisted state that is caused by a local event that has not gone through ordering */ +final case class LocalBlockUpdate(local: LocalEvent) extends BlockUpdate diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdateGenerator.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdateGenerator.scala index 80c0e0ccc..ec0478baa 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdateGenerator.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/BlockUpdateGenerator.scala @@ -15,6 +15,7 @@ import cats.syntax.traverse.* import com.daml.error.BaseError import com.daml.nonempty.catsinstances.* import com.daml.nonempty.{NonEmpty, NonEmptyUtil} +import com.digitalasset.canton.SequencerCounter import com.digitalasset.canton.config.CantonRequireTypes.String73 import com.digitalasset.canton.crypto.{ DomainSyncCryptoClient, @@ -23,12 +24,12 @@ import com.digitalasset.canton.crypto.{ SyncCryptoClient, } import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.block.BlockUpdateGenerator.BlockChunk import com.digitalasset.canton.domain.block.LedgerBlockEvent.* import com.digitalasset.canton.domain.block.data.{ BlockEphemeralState, BlockInfo, - BlockUpdateClosureWithHeight, - EphemeralState, + BlockUpdateEphemeralState, } import com.digitalasset.canton.domain.sequencing.sequencer.InFlightAggregation.AggregationBySender import com.digitalasset.canton.domain.sequencing.sequencer.* @@ -41,7 +42,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.traffic.{ SequencerRateLimitManager, } import com.digitalasset.canton.error.BaseAlarm -import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown, UnlessShutdown} +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.sequencing.OrdinarySerializedEvent import com.digitalasset.canton.sequencing.client.SequencedEventValidator @@ -52,25 +53,22 @@ import com.digitalasset.canton.topology.{DomainId, Member, PartyId, SequencerId} import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.util.Thereafter.syntax.* -import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil, MapsUtil, MonadUtil, SeqUtil} +import com.digitalasset.canton.util.{EitherTUtil, ErrorUtil, IterableUtil, MapsUtil, MonadUtil} import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{SequencerCounter, checked} import monocle.macros.syntax.lens.* +import scala.collection.immutable import scala.concurrent.{ExecutionContext, Future} -import scala.util.Success /** Exposes functions that take the deserialized contents of a block from a blockchain integration - * and computes the new [[com.digitalasset.canton.domain.block.BlockUpdates]]. - * Used by Ethereum and Fabric integrations. + * and computes the new [[com.digitalasset.canton.domain.block.BlockUpdate]]s. * * In particular, these functions are responsible for the final timestamp assignment of a given submission request. * The timestamp assignment works as follows: * 1. an initial timestamp is assigned to the submission request by the sequencer that writes it to the ledger * 2. each sequencer that reads the block potentially adapts the previously assigned timestamp * deterministically via `ensureStrictlyIncreasingTimestamp` - * 3. this timestamp is used to compute the [[com.digitalasset.canton.domain.block.BlockUpdates]] + * 3. this timestamp is used to compute the [[com.digitalasset.canton.domain.block.BlockUpdate]]s * * Reasoning: * Step 1 is done so that every sequencer sees the same timestamp for a given event. @@ -81,7 +79,37 @@ import scala.util.Success * For step 2, we assume that every sequencer observes the same stream of events from the underlying ledger * (and especially that events are always read in the same order). */ -class BlockUpdateGenerator( +trait BlockUpdateGenerator { + type InternalState + + def internalStateFor(state: BlockEphemeralState): InternalState + + def extractBlockEvents(block: RawLedgerBlock): BlockEvents + + def chunkBlock(block: BlockEvents)(implicit + traceContext: TraceContext + ): immutable.Iterable[BlockChunk] + + def processBlockChunk(state: InternalState, chunk: BlockChunk)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): FutureUnlessShutdown[(InternalState, OrderedBlockUpdate)] +} + +object BlockUpdateGenerator { + + type SignedEvents = NonEmpty[Map[Member, OrdinarySerializedEvent]] + + sealed trait BlockChunk extends Product with Serializable + final case class NextChunk( + blockHeight: Long, + chunkIndex: Int, + events: NonEmpty[Seq[Traced[LedgerBlockEvent]]], + ) extends BlockChunk + final case class EndOfBlock(blockHeight: Long) extends BlockChunk +} + +class BlockUpdateGeneratorImpl( domainId: DomainId, protocolVersion: ProtocolVersion, domainSyncCryptoApi: DomainSyncCryptoClient, @@ -91,13 +119,13 @@ class BlockUpdateGenerator( orderingTimeFixMode: OrderingTimeFixMode, protected val loggerFactory: NamedLoggerFactory, )(implicit val closeContext: CloseContext) - extends NamedLogging { + extends BlockUpdateGenerator + with NamedLogging { import com.digitalasset.canton.domain.block.BlockUpdateGenerator.* + import com.digitalasset.canton.domain.block.BlockUpdateGeneratorImpl.* - def asBlockUpdate(block: RawLedgerBlock)(implicit - executionContext: ExecutionContext - ): BlockUpdateClosureWithHeight = { + override def extractBlockEvents(block: RawLedgerBlock): BlockEvents = { val ledgerBlockEvents = block.events.mapFilter { tracedEvent => implicit val traceContext: TraceContext = tracedEvent.traceContext LedgerBlockEvent.fromRawBlockEvent(protocolVersion)(tracedEvent.value) match { @@ -108,97 +136,78 @@ class BlockUpdateGenerator( Some(Traced(value)) } } - asBlockUpdate(BlockEvents(block.blockHeight, ledgerBlockEvents)) - } - - def asBlockUpdate(blockContents: BlockEvents)(implicit - executionContext: ExecutionContext - ): BlockUpdateClosureWithHeight = { - implicit val blockTraceContext: TraceContext = - TraceContext.ofBatch(blockContents.events)(logger) - BlockUpdateClosureWithHeight( - blockContents.height, - processEvents(blockContents), - blockTraceContext, - ) + BlockEvents(block.blockHeight, ledgerBlockEvents) } - private case class State( - lastTs: CantonTimestamp, + override type InternalState = State + private[block] case class State( + lastBlockTs: CantonTimestamp, + lastChunkTs: CantonTimestamp, latestSequencerEventTimestamp: Option[CantonTimestamp], - ephemeral: EphemeralState, + ephemeral: BlockUpdateEphemeralState, ) - private def processEvents(blockEvents: BlockEvents)( - blockState: BlockEphemeralState - )(implicit - traceContext: TraceContext, - executionContext: ExecutionContext, - ): FutureUnlessShutdown[BlockUpdates] = { - val lastBlockTs = blockState.latestBlock.lastTs - val lastBlockSafePruning = - blockState.state.status.safePruningTimestampFor(CantonTimestamp.MaxValue) - val BlockEvents(height, tracedEvents) = blockEvents - - val chunks = splitAfterEnvelopesForSequencer(tracedEvents) - logger.debug(s"Splitting block $height into ${chunks.length} chunks") - val iter = chunks.iterator - - def go(state: State): FutureUnlessShutdown[BlockUpdates] = { - if (iter.hasNext) { - val nextChunk = iter.next() - processChunk(height, lastBlockTs, lastBlockSafePruning, state, nextChunk).map { - case (chunkUpdate, nextState) => - PartialBlockUpdate(chunkUpdate, go(nextState)) - } - } else { - val block = BlockInfo(height, state.lastTs, state.latestSequencerEventTimestamp) - FutureUnlessShutdown.pure(CompleteBlockUpdate(block)) - } - } - - val initialState = State( - blockState.latestBlock.lastTs, - blockState.latestBlock.latestSequencerEventTimestamp, - blockState.state, - ) - go(initialState) - } + override def internalStateFor(state: BlockEphemeralState): InternalState = State( + lastBlockTs = state.latestBlock.lastTs, + lastChunkTs = state.latestBlock.lastTs, + latestSequencerEventTimestamp = state.latestBlock.latestSequencerEventTimestamp, + ephemeral = state.state.toBlockUpdateEphemeralState, + ) - private def splitAfterEnvelopesForSequencer( - blockEvents: Seq[Traced[LedgerBlockEvent]] - ): Seq[NonEmpty[Seq[Traced[LedgerBlockEvent]]]] = { + override def chunkBlock( + block: BlockEvents + )(implicit traceContext: TraceContext): immutable.Iterable[BlockChunk] = { // We must start a new chunk whenever the chunk processing advances lastSequencerEventTimestamp - // Otherwise the logic for retrieving a topology snapshot could deadlock + // Otherwise the logic for retrieving a topology snapshot or traffic state could deadlock def possibleEventToThisSequencer(event: LedgerBlockEvent): Boolean = event match { case Send(_, signedSubmissionRequest) => val allRecipients = signedSubmissionRequest.content.batch.allRecipients allRecipients.contains(AllMembersOfDomain) || - allRecipients.contains(MemberRecipient(sequencerId)) || allRecipients.contains(SequencersOfDomain) case _ => false } - SeqUtil.splitAfter(blockEvents)(event => possibleEventToThisSequencer(event.value)) + val blockHeight = block.height + + IterableUtil + .splitAfter(block.events)(event => possibleEventToThisSequencer(event.value)) + .zipWithIndex + .map { case (events, index) => + NextChunk(blockHeight, index, events) + } ++ Seq(EndOfBlock(blockHeight)) + } + + override final def processBlockChunk(state: InternalState, chunk: BlockChunk)(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): FutureUnlessShutdown[(InternalState, OrderedBlockUpdate)] = { + chunk match { + case EndOfBlock(height) => + val newState = state.copy(lastBlockTs = state.lastChunkTs) + val update = CompleteBlockUpdate( + BlockInfo(height, state.lastChunkTs, state.latestSequencerEventTimestamp) + ) + FutureUnlessShutdown.pure(newState -> update) + case NextChunk(height, index, events) => + processChunk(height, state, events) + } } private def processChunk( height: Long, - lastBlockTs: CantonTimestamp, - lastBlockSafePruning: CantonTimestamp, state: State, chunk: NonEmpty[Seq[Traced[LedgerBlockEvent]]], )(implicit ec: ExecutionContext, traceContext: TraceContext, - ): FutureUnlessShutdown[(ChunkUpdate, State)] = { + ): FutureUnlessShutdown[(State, ChunkUpdate)] = { val (lastTs, revFixedTsChanges) = // With this logic, we assign to the initial non-Send events the same timestamp as for the last // block. This means that we will include these events in the ephemeral state of the previous block // when we re-read it from the database. But this doesn't matter given that all those events are idempotent. chunk.forgetNE.foldLeft[ (CantonTimestamp, Seq[(CantonTimestamp, Traced[LedgerBlockEvent])]), - ]((state.lastTs, Seq.empty)) { case ((lastTs, events), event) => + ]((state.lastChunkTs, Seq.empty)) { case ((lastTs, events), event) => event.value match { case send: LedgerBlockEvent.Send => val ts = ensureStrictlyIncreasingTimestamp(lastTs, send.timestamp) @@ -215,57 +224,22 @@ class BlockUpdateGenerator( } val fixedTsChanges: Seq[(CantonTimestamp, Traced[LedgerBlockEvent])] = revFixedTsChanges.reverse - val membersToDisable = fixedTsChanges.collect { case (_, Traced(DisableMember(member))) => - member - } val submissionRequests = fixedTsChanges.collect { case (ts, ev @ Traced(sendEvent: Send)) => // Discard the timestamp of the `Send` event as this one is obsolete (ts, ev.map(_ => sendEvent.signedSubmissionRequest)) } - /* Pruning requests should only specify pruning timestamps that were safe at the time - * they were submitted for sequencing. A safe pruning timestamp never becomes unsafe, - * so it should still be safe. We nevertheless double-check this here and error on unsafe pruning requests. - * Since the safe pruning timestamp is before or at the last acknowledgement of each enabled member, - * and both acknowledgements and member enabling/disabling take effect only when they are part of a block, - * the safe pruning timestamp must be at most the last event of the previous block. - * If it is later, then the sequencer node that submitted the pruning request is buggy - * and it is better to crash. - */ - // TODO(M99) Gracefully deal with buggy sequencer nodes - val safePruningTimestamp = lastBlockSafePruning.min(lastBlockTs) - val allPruneRequests = fixedTsChanges.collect { case (_, traced @ Traced(Prune(ts))) => - Traced(ts)(traced.traceContext) - } - val (pruneRequests, invalidPruneRequests) = allPruneRequests.partition( - _.value <= safePruningTimestamp - ) - if (invalidPruneRequests.nonEmpty) { - invalidPruneRequests.foreach(_.withTraceContext { implicit traceContext => pruneTimestamp => - logger.error( - s"Unsafe pruning request at $pruneTimestamp. The latest safe pruning timestamp is $safePruningTimestamp for block $height" - ) - }) - val alarm = SequencerError.InvalidPruningRequestOnChain.Error( - height, - lastBlockTs, - lastBlockSafePruning, - invalidPruneRequests.map(_.value), - ) - throw alarm.asGrpcError - } - for { submissionRequestsWithSnapshots <- addSnapshots( state.latestSequencerEventTimestamp, - state.ephemeral.heads.get(sequencerId), + state.ephemeral.headCounter(sequencerId), submissionRequests, ) newMembers <- detectMembersWithoutSequencerCounters(submissionRequestsWithSnapshots, state) _ = if (newMembers.nonEmpty) { logger.info(s"Detected new members without sequencer counter: $newMembers") } - validatedAcks <- processAcknowledgements(lastBlockTs, state, fixedTsChanges) + validatedAcks <- processAcknowledgements(state, fixedTsChanges) (acksByMember, invalidAcks) = validatedAcks // Warn if we use an approximate snapshot but only after we've read at least one warnIfApproximate = state.ephemeral.headCounterAboveGenesis(sequencerId) @@ -309,32 +283,27 @@ class BlockUpdateGenerator( } else FutureUnlessShutdown.pure(Map.empty) stateWithNewMembers = { val newMemberStatus = newMembers.map { case (member, ts) => - member -> SequencerMemberStatus(member, ts, None) + member -> InternalSequencerMemberStatus(ts, None) } - val membersWithDisablements = - membersToDisable.foldLeft(state.ephemeral.status.membersMap ++ newMemberStatus) { - case (membersMap, memberToDisable) => - membersMap.updated(memberToDisable, membersMap(memberToDisable).copy(enabled = false)) - } - - val newMembersWithDisablementsAndAcknowledgements = - acksByMember.foldLeft(membersWithDisablements) { case (membersMap, (member, timestamp)) => - membersMap - .get(member) - .fold { - logger.debug( - s"Ack at $timestamp for $member being ignored because the member has not yet been registered." - ) - membersMap - } { memberStatus => - membersMap.updated(member, memberStatus.copy(lastAcknowledged = Some(timestamp))) - } + val newMembersWithAcknowledgements = + acksByMember.foldLeft(state.ephemeral.membersMap ++ newMemberStatus) { + case (membersMap, (member, timestamp)) => + membersMap + .get(member) + .fold { + logger.debug( + s"Ack at $timestamp for $member being ignored because the member has not yet been registered." + ) + membersMap + } { memberStatus => + membersMap.updated(member, memberStatus.copy(lastAcknowledged = Some(timestamp))) + } } state - .focus(_.ephemeral.status.membersMap) - .replace(newMembersWithDisablementsAndAcknowledgements) + .focus(_.ephemeral.membersMap) + .replace(newMembersWithAcknowledgements) .focus(_.ephemeral.trafficState) .modify(_ ++ newMembersTraffic) } @@ -343,33 +312,35 @@ class BlockUpdateGenerator( Seq.empty[SignedEvents], Map.empty[AggregationId, InFlightAggregationUpdate], stateWithNewMembers.ephemeral, + Option.empty[CantonTimestamp], ), submissionRequestsWithSnapshots, )(validateSubmissionRequestAndAddEvents(height, state.latestSequencerEventTimestamp)) - } yield result match { - case (reversedSignedEvents, inFlightAggregationUpdates, finalEphemeralState) => - val lastSequencerEventTimestamp: Option[CantonTimestamp] = - reversedSignedEvents.iterator.collectFirst { - case memberEvents if memberEvents.contains(sequencerId) => - checked(memberEvents(sequencerId)).timestamp - } - val chunkUpdate = ChunkUpdate( - newMembers, - membersToDisable, - acksByMember, - invalidAcks, - reversedSignedEvents.reverse, - inFlightAggregationUpdates, - pruneRequests, - lastSequencerEventTimestamp, - finalEphemeralState, - ) - val newState = State( - lastTs, - lastSequencerEventTimestamp.orElse(state.latestSequencerEventTimestamp), - finalEphemeralState, - ) - (chunkUpdate, newState) + } yield { + val ( + reversedSignedEvents, + inFlightAggregationUpdates, + finalEphemeralState, + lastSequencerEventTimestamp, + ) = result + val finalEphemeralStateWithAggregationExpiry = + finalEphemeralState.evictExpiredInFlightAggregations(lastTs) + val chunkUpdate = ChunkUpdate( + newMembers, + acksByMember, + invalidAcks, + reversedSignedEvents.reverse, + inFlightAggregationUpdates, + lastSequencerEventTimestamp, + finalEphemeralStateWithAggregationExpiry, + ) + val newState = State( + state.lastBlockTs, + lastTs, + lastSequencerEventTimestamp.orElse(state.latestSequencerEventTimestamp), + finalEphemeralStateWithAggregationExpiry, + ) + (newState, chunkUpdate) } } @@ -473,7 +444,6 @@ class BlockUpdateGenerator( } private def processAcknowledgements( - lastBlockTs: CantonTimestamp, state: State, fixedTsChanges: Seq[(CantonTimestamp, Traced[LedgerBlockEvent])], )(implicit @@ -485,7 +455,7 @@ class BlockUpdateGenerator( for { snapshot <- SyncCryptoClient.getSnapshotForTimestampUS( domainSyncCryptoApi, - lastBlockTs, + state.lastBlockTs, state.latestSequencerEventTimestamp, protocolVersion, warnIfApproximate = false, @@ -496,14 +466,14 @@ class BlockUpdateGenerator( (goodTsAcks, futureAcks) = allAcknowledgements.partition { tracedSignedAck => // Intentionally use the previous block's last timestamp // such that the criterion does not depend on how the block events are chunked up. - tracedSignedAck.value.content.timestamp <= lastBlockTs + tracedSignedAck.value.content.timestamp <= state.lastBlockTs } invalidTsAcks = futureAcks.map(_.withTraceContext { implicit traceContext => signedAck => val ack = signedAck.content val member = ack.member val timestamp = ack.timestamp val error = - SequencerError.InvalidAcknowledgementTimestamp.Error(member, timestamp, lastBlockTs) + SequencerError.InvalidAcknowledgementTimestamp.Error(member, timestamp, state.lastBlockTs) (member, timestamp, error: BaseError) }) sigChecks <- FutureUnlessShutdown.outcomeF(Future.sequence(goodTsAcks.map(_.withTraceContext { @@ -520,7 +490,7 @@ class BlockUpdateGenerator( ack.member, ack.timestamp, SequencerError.InvalidAcknowledgementSignature - .Error(signedAck, lastBlockTs, e): BaseError, + .Error(signedAck, state.lastBlockTs, e): BaseError, ) ) .map(_ => (ack.member, ack.timestamp)) @@ -542,12 +512,24 @@ class BlockUpdateGenerator( height: Long, latestSequencerEventTimestamp: Option[CantonTimestamp], )( - acc: (Seq[SignedEvents], InFlightAggregationUpdates, EphemeralState), + acc: ( + Seq[SignedEvents], + InFlightAggregationUpdates, + BlockUpdateEphemeralState, + Option[CantonTimestamp], + ), sequencedSubmissionRequest: SequencedSubmission, )(implicit ec: ExecutionContext - ): FutureUnlessShutdown[(Seq[SignedEvents], InFlightAggregationUpdates, EphemeralState)] = { - val (reversedEvents, inFlightAggregationUpdates, stFromAcc) = acc + ): FutureUnlessShutdown[ + ( + Seq[SignedEvents], + InFlightAggregationUpdates, + BlockUpdateEphemeralState, + Option[CantonTimestamp], + ) + ] = { + val (reversedEvents, inFlightAggregationUpdates, stFromAcc, sequencerEventTimestampSoFarO) = acc val SequencedSubmission( sequencingTimestamp, signedSubmissionRequest, @@ -556,14 +538,21 @@ class BlockUpdateGenerator( ) = sequencedSubmissionRequest implicit val traceContext = sequencedSubmissionRequest.traceContext + ErrorUtil.requireState( + sequencerEventTimestampSoFarO.isEmpty, + "Only the last event in a chunk could be addressed to the sequencer", + ) + def processSubmissionOutcome( - st: EphemeralState, + st: BlockUpdateEphemeralState, outcome: SubmissionRequestOutcome, + sequencerEventTimestampO: Option[CantonTimestamp], ): FutureUnlessShutdown[ ( Seq[SignedEvents], InFlightAggregationUpdates, - EphemeralState, + BlockUpdateEphemeralState, + Option[CantonTimestamp], ) ] = outcome match { case SubmissionRequestOutcome(deliverEvents, newAggregationO, signingSnapshotO) => @@ -588,8 +577,7 @@ class BlockUpdateGenerator( )(_ tryMerge _) } val newState = - st.copy(inFlightAggregations = newInFlightAggregations) - .copy(checkpoints = newCheckpoints) + st.copy(inFlightAggregations = newInFlightAggregations, checkpoints = newCheckpoints) // If we haven't yet computed a snapshot for signing, // we now get one for the sequencing timestamp @@ -618,6 +606,7 @@ class BlockUpdateGenerator( signedEvents +: reversedEvents, newInFlightAggregationUpdates, trafficUpdatedState, + sequencerEventTimestampO, ) } } @@ -631,8 +620,8 @@ class BlockUpdateGenerator( signingSnapshotO, latestSequencerEventTimestamp, ) - (newState, outcome) = newStateAndOutcome - result <- processSubmissionOutcome(newState, outcome) + (newState, outcome, sequencerEventTimestampO) = newStateAndOutcome + result <- processSubmissionOutcome(newState, outcome, sequencerEventTimestampO) } yield { logger.debug( s"At block $height, the submission request ${signedSubmissionRequest.content.messageId} at $sequencingTimestamp created the following counters: \n" ++ outcome.eventsByMember @@ -675,7 +664,7 @@ class BlockUpdateGenerator( submissionRequest: SubmissionRequest, topologySnapshot: SyncCryptoApi, sequencingTimestamp: CantonTimestamp, - st: EphemeralState, + st: BlockUpdateEphemeralState, )(implicit traceContext: TraceContext, ec: ExecutionContext, @@ -728,14 +717,16 @@ class BlockUpdateGenerator( private def validateAndGenerateSequencedEvents( sequencingTimestamp: CantonTimestamp, signedSubmissionRequest: SignedContent[SubmissionRequest], - st: EphemeralState, + st: BlockUpdateEphemeralState, sequencingSnapshot: SyncCryptoApi, signingSnapshotO: Option[SyncCryptoApi], latestSequencerEventTimestamp: Option[CantonTimestamp], )(implicit traceContext: TraceContext, executionContext: ExecutionContext, - ): FutureUnlessShutdown[(EphemeralState, SubmissionRequestOutcome)] = { + ): FutureUnlessShutdown[ + (BlockUpdateEphemeralState, SubmissionRequestOutcome, Option[CantonTimestamp]) + ] = { val submissionRequest = signedSubmissionRequest.content // In the following EitherT, Lefts are used to stop processing the submission request and immediately produce the sequenced events @@ -756,10 +747,10 @@ class BlockUpdateGenerator( _ <- EitherT.cond[FutureUnlessShutdown]( sequencingTimestamp <= submissionRequest.maxSequencingTime, (), - // the sequencer is beyond the timestamp allowed for sequencing this request so it is silently dropped - // a correct sender should be monitoring their sequenced events and noticed that the max-sequencing-time has been - // exceeded and trigger a timeout - // we don't log this as a warning as it is expected behaviour. within a distributed network, the source of + // The sequencer is beyond the timestamp allowed for sequencing this request so it is silently dropped. + // A correct sender should be monitoring their sequenced events and notice that the max-sequencing-time has been + // exceeded and trigger a timeout. + // We don't log this as a warning as it is expected behaviour. Within a distributed network, the source of // a delay can come from different nodes and we should only log this as a warning in a way where we can // attribute the delay to a specific node. { @@ -792,7 +783,7 @@ class BlockUpdateGenerator( sequencingTimestamp, submissionRequest, latestSequencerEventTimestamp, - st.heads.get(sequencerId), + st.headCounter(sequencerId), st.tryNextCounter, ) topologySnapshot = signingSnapshotO.getOrElse(sequencingSnapshot) @@ -815,17 +806,15 @@ class BlockUpdateGenerator( topologySnapshot, st, ) - stateAfterTrafficConsume <- EitherT.liftF { - updateRateLimiting( - submissionRequest, - sequencingTimestamp, - sequencingSnapshot, - groupToMembers, - st, - latestSequencerEventTimestamp, - warnIfApproximate = st.headCounterAboveGenesis(sequencerId), - ) - } + stateAfterTrafficConsume <- updateRateLimiting( + submissionRequest, + sequencingTimestamp, + sequencingSnapshot, + groupToMembers, + st, + latestSequencerEventTimestamp, + warnIfApproximate = st.headCounterAboveGenesis(sequencerId), + ) _ <- EitherT.cond[FutureUnlessShutdown]( SequencerValidations.checkFromParticipantToAtMostOneMediator(submissionRequest), (), { @@ -889,14 +878,46 @@ class BlockUpdateGenerator( case (aggregationId, inFlightAggregationUpdate, _) => aggregationId -> inFlightAggregationUpdate } - stateAfterTrafficConsume -> SubmissionRequestOutcome( - events, - aggregationUpdate, - signingSnapshotO, + + // We need to know whether the group of sequencers was addressed in order to update `latestSequencerEventTimestamp`. + // Simply checking whether this sequencer is within the resulting event recipients opens up + // the door for a malicious participant to target a single sequencer, which would result in the + // various sequencers reaching a different value. + // + // Currently, the only use cases of addressing a sequencer are: + // * via AllMembersOfDomain for topology transactions + // * via SequencersOfDomain for traffic control top-up messages + // + // Therefore, we check whether this sequencer was addressed via a group address to avoid the above + // case. + // + // NOTE: Pruning concerns + // ---------------------- + // `latestSequencerEventTimestamp` is relevant for pruning. + // For the traffic top-ups, we can use the block's last timestamp to signal "safe-to-prune", because + // the logic to compute the balance based on `latestSequencerEventTimestamp` sits inside the manager + // and we can make it work together with pruning. + // For topology, pruning is not yet implemented. However, the logic to compute snapshot timestamps + // sits outside of the topology processor and so from the topology processor's point of view, + // `latestSequencerEventTimestamp` should be part of a "safe-to-prune" timestamp calculation. + // + // See https://github.com/DACH-NY/canton/pull/17676#discussion_r1515926774 + val sequencerIsAddressed = + groupToMembers.contains(AllMembersOfDomain) || groupToMembers.contains(SequencersOfDomain) + val sequencerEventTimestampO = Option.when(sequencerIsAddressed)(sequencingTimestamp) + + ( + stateAfterTrafficConsume, + SubmissionRequestOutcome( + events, + aggregationUpdate, + signingSnapshotO, + ), + sequencerEventTimestampO, ) } resultET.value.map { - case Left(outcome) => st -> outcome + case Left(outcome) => (st, outcome, None) case Right(newStateAndOutcome) => newStateAndOutcome } } @@ -965,8 +986,8 @@ class BlockUpdateGenerator( ): EitherT[FutureUnlessShutdown, SubmissionRequestOutcome, Unit] = { val submissionRequest = signedSubmissionRequest.content - // if we haven't seen any topology transactions yet, then we cannot verify signatures, so we skip it. - // in practice this should only happen for the first ever transaction, which contains the initial topology data. + // If we haven't seen any topology transactions yet, then we cannot verify signatures, so we skip it. + // In practice this should only happen for the first ever transaction, which contains the initial topology data. val skipCheck = latestSequencerEventTimestamp.isEmpty || !submissionRequest.sender.isAuthenticated if (skipCheck) { @@ -1055,10 +1076,7 @@ class BlockUpdateGenerator( ) } - // Block sequencers keep track of the latest topology client timestamp - // only after protocol version 3 has been released, - // so silence the warning if we are running on a higher protocol version - // or have not delivered anything to the sequencer's topology client. + // Silence the warning if we have not delivered anything to the sequencer's topology client. val warnIfApproximate = sequencersSequencerCounter.exists(_ > SequencerCounter.Genesis) SequencedEventValidator .validateTopologyTimestampUS( @@ -1180,7 +1198,7 @@ class BlockUpdateGenerator( submissionRequest: SubmissionRequest, sequencingTimestamp: CantonTimestamp, topologySnapshot: SyncCryptoApi, - st: EphemeralState, + st: BlockUpdateEphemeralState, )(implicit traceContext: TraceContext, executionContext: ExecutionContext, @@ -1328,7 +1346,7 @@ class BlockUpdateGenerator( private def signEvents( events: NonEmpty[Map[Member, SequencedEvent[ClosedEnvelope]]], snapshot: SyncCryptoApi, - ephemeralState: EphemeralState, + ephemeralState: BlockUpdateEphemeralState, // sequencingTimestamp and sequencingSnapshot used for tombstones when snapshot does not include sequencer signing keys sequencingTimestamp: CantonTimestamp, sequencingSnapshot: SyncCryptoApi, @@ -1455,12 +1473,15 @@ class BlockUpdateGenerator( } private def updateTrafficStates( - ephemeralState: EphemeralState, + ephemeralState: BlockUpdateEphemeralState, members: Set[Member], sequencingTimestamp: CantonTimestamp, snapshot: SyncCryptoApi, latestTopologyTimestamp: Option[CantonTimestamp], - )(implicit ec: ExecutionContext, tc: TraceContext) = { + )(implicit + ec: ExecutionContext, + tc: TraceContext, + ): FutureUnlessShutdown[BlockUpdateEphemeralState] = { snapshot.ipsSnapshot .trafficControlParameters(protocolVersion) .flatMap { @@ -1491,44 +1512,25 @@ class BlockUpdateGenerator( sequencingTimestamp: CantonTimestamp, sequencingSnapshot: SyncCryptoApi, groupToMembers: Map[GroupRecipient, Set[Member]], - ephemeralState: EphemeralState, + ephemeralState: BlockUpdateEphemeralState, lastSeenTopologyTimestamp: Option[CantonTimestamp], warnIfApproximate: Boolean, )(implicit ec: ExecutionContext, tc: TraceContext, - ): FutureUnlessShutdown[EphemeralState] = { + ): EitherT[FutureUnlessShutdown, SubmissionRequestOutcome, BlockUpdateEphemeralState] = { val newStateOF = for { parameters <- OptionT( sequencingSnapshot.ipsSnapshot.trafficControlParameters(protocolVersion) ) sender = request.sender // Get the traffic from the ephemeral state - trafficState <- OptionT - .fromOption[FutureUnlessShutdown](ephemeralState.trafficState.get(sender)) - .orElse { - // If it's not there, see if the member is registered and if so create a new traffic state for it - val statusO = ephemeralState.status.members.find { status => - status.member == sender && status.enabled - } - OptionT( - statusO.traverse(status => - rateLimitManager.createNewTrafficStateAt( - status.member, - status.registeredAt.immediatePredecessor, - parameters, - ) - ) - ) - } - .thereafter { - case Success(UnlessShutdown.Outcome(None)) => - // If there's no trace of this member, log it and let the event through - logger.warn( - s"Sender $sender unknown by rate limiter. The message will still be delivered." - ) - case _ => - } + trafficState = ephemeralState.trafficState.getOrElse( + sender, + // If there's no trace of this member. But we have ensured that the sender is registered + // and all registered members should have a traffic state. + ErrorUtil.invalidState(s"Sender $sender unknown by rate limiter."), + ) _ <- if (sequencingTimestamp <= trafficState.timestamp) { logger.warn( @@ -1550,42 +1552,62 @@ class BlockUpdateGenerator( lastBalanceUpdateTimestamp = lastSeenTopologyTimestamp, warnIfApproximate = warnIfApproximate, ) + .map(Right(_)) .valueOr { case error: SequencerRateLimitError.EventOutOfOrder => logger.warn( s"Consumed an event out of order for member ${error.member} with traffic state '$trafficState'. Current traffic state timestamp is ${error.currentTimestamp} but event timestamp is ${error.eventTimestamp}. The traffic state will not be updated." ) - trafficState + Right(trafficState) + case error: SequencerRateLimitError.AboveTrafficLimit + if parameters.enforceRateLimiting => + logger.info( + s"Submission from member ${error.member} with traffic state '${error.trafficState.toString}' was above traffic limit. Submission cost: ${error.trafficCost.value}. The message will not be delivered." + ) + Left( + SubmissionRequestOutcome.reject( + sender, + DeliverError.create( + ephemeralState.tryNextCounter(sender), + sequencingTimestamp, + domainId, + request.messageId, + SequencerErrors + .TrafficCredit( + s"Not enough traffic credit for sender $sender to send message with ID ${request.messageId}: $error" + ), + protocolVersion, + ), + ) + ) case error: SequencerRateLimitError.AboveTrafficLimit => logger.info( s"Submission from member ${error.member} with traffic state '${error.trafficState.toString}' was above traffic limit. Submission cost: ${error.trafficCost.value}. The message will still be delivered." ) - error.trafficState + Right(error.trafficState) case error: SequencerRateLimitError.UnknownBalance => logger.warn( s"Could not obtain valid balance at $sequencingTimestamp for member ${error.member} with traffic state '$trafficState'. The message will still be delivered but the traffic state has not been updated." ) - trafficState + Right(trafficState) } ) - } yield updateTrafficState(ephemeralState, sender, newSenderTrafficState) - newStateOF.getOrElse(ephemeralState) + } yield newSenderTrafficState.map(updateTrafficState(ephemeralState, sender, _)) + + EitherT(newStateOF.getOrElse(Right(ephemeralState))) } private def updateTrafficState( - ephemeralState: EphemeralState, + ephemeralState: BlockUpdateEphemeralState, member: Member, trafficState: TrafficState, - ): EphemeralState = + ): BlockUpdateEphemeralState = ephemeralState .focus(_.trafficState) .modify(_.updated(member, trafficState)) } -object BlockUpdateGenerator { - - type SignedEvents = NonEmpty[Map[Member, OrdinarySerializedEvent]] - +object BlockUpdateGeneratorImpl { private type EventsForSubmissionRequest = Map[Member, SequencedEvent[ClosedEnvelope]] /** Describes the outcome of processing a submission request: diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala index d11ef126b..586ca9b46 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/LedgerBlockEvent.scala @@ -33,10 +33,8 @@ object LedgerBlockEvent extends HasLoggerName { signedSubmissionRequest: SignedContent[SubmissionRequest], ) extends LedgerBlockEvent final case class AddMember(member: Member) extends LedgerBlockEvent - final case class DisableMember(member: Member) extends LedgerBlockEvent final case class Acknowledgment(request: SignedContent[AcknowledgeRequest]) extends LedgerBlockEvent - final case class Prune(timestamp: CantonTimestamp) extends LedgerBlockEvent def fromRawBlockEvent( protocolVersion: ProtocolVersion diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockEphemeralState.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockEphemeralState.scala index 788356663..c52df44fa 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockEphemeralState.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockEphemeralState.scala @@ -4,15 +4,13 @@ package com.digitalasset.canton.domain.block.data import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.block.{BlockUpdates, UninitializedBlockHeight} +import com.digitalasset.canton.domain.block.UninitializedBlockHeight import com.digitalasset.canton.domain.sequencing.sequencer.{ SequencerInitialState, SequencerSnapshot, } import com.digitalasset.canton.domain.sequencing.traffic.TrafficBalance -import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggingContext} -import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.version.ProtocolVersion import com.google.protobuf.ByteString @@ -128,15 +126,3 @@ object BlockEphemeralState { ) } } - -/** Helper case class generated by the BlockUpdateGenerator after being given a block by a blockchain-based sequencer - * integration. - * - * @param height height of the block used for updating the ephemeral state - * @param updateGenerator closure of the block updates generated by processing a given block, for a certain ephemeral state - */ -final case class BlockUpdateClosureWithHeight( - height: Long, - updateGenerator: BlockEphemeralState => FutureUnlessShutdown[BlockUpdates], - blockTraceContext: TraceContext, -) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockUpdateEphemeralState.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockUpdateEphemeralState.scala new file mode 100644 index 000000000..585081e71 --- /dev/null +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/BlockUpdateEphemeralState.scala @@ -0,0 +1,59 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.domain.block.data + +import com.digitalasset.canton.SequencerCounter +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.domain.sequencing.sequencer.store.CounterCheckpoint +import com.digitalasset.canton.domain.sequencing.sequencer.{ + InFlightAggregations, + InternalSequencerMemberStatus, +} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencing.protocol.TrafficState +import com.digitalasset.canton.topology.Member + +/** Subset of the [[EphemeralState]] that is used by the block processing stage + * of the [[com.digitalasset.canton.domain.block.BlockUpdateGenerator]] + */ +final case class BlockUpdateEphemeralState( + checkpoints: Map[Member, CounterCheckpoint], + inFlightAggregations: InFlightAggregations, + membersMap: Map[Member, InternalSequencerMemberStatus], + trafficState: Map[Member, TrafficState], +) extends PrettyPrinting { + + /** Return true if the head counter for the member is above the genesis counter. + * False otherwise + */ + def headCounterAboveGenesis(member: Member): Boolean = + checkpoints.get(member).exists(_.counter > SequencerCounter.Genesis) + + def registeredMembers: Set[Member] = membersMap.keySet + + def headCounter(member: Member): Option[SequencerCounter] = checkpoints.get(member).map(_.counter) + + /** Next counter value for a single member. + * Callers must check that the member has been previously registered otherwise a [[java.lang.IllegalArgumentException]] will be thrown. + */ + def tryNextCounter(member: Member): SequencerCounter = { + require(registeredMembers contains member, s"Member ($member) must be registered") + + headCounter(member).fold(SequencerCounter.Genesis)(_ + 1) + } + + def evictExpiredInFlightAggregations(upToInclusive: CantonTimestamp): BlockUpdateEphemeralState = + this.copy( + inFlightAggregations = inFlightAggregations.filterNot { case (_, inFlightAggregation) => + inFlightAggregation.expired(upToInclusive) + } + ) + + override def pretty: Pretty[BlockUpdateEphemeralState] = prettyOfClass( + param("checkpoints", _.checkpoints), + param("in-flight aggregations", _.inFlightAggregations), + param("members map", _.membersMap), + param("traffic state", _.trafficState), + ) +} diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/EphemeralState.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/EphemeralState.scala index b10babc97..9f7122323 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/EphemeralState.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/EphemeralState.scala @@ -21,7 +21,7 @@ import com.digitalasset.canton.version.ProtocolVersion /** State held in memory by [[com.digitalasset.canton.domain.block.BlockSequencerStateManager]] to keep track of: * - * @param heads The latest counter value for members who have previously received an event + * @param checkpoints The latest counter value for members who have previously received an event * (registered members who have not yet received an event will not have a value) * @param inFlightAggregations All aggregatable submission requests by their [[com.digitalasset.canton.sequencing.protocol.AggregationId]] * whose [[com.digitalasset.canton.domain.sequencing.sequencer.InFlightAggregation.maxSequencingTimestamp]] has not yet elapsed. @@ -34,14 +34,19 @@ final case class EphemeralState( checkpoints: Map[Member, CounterCheckpoint], trafficState: Map[Member, TrafficState], ) extends PrettyPrinting { - val registeredMembers: Set[Member] = status.members.map(_.member).toSet - val heads: Map[Member, SequencerCounter] = checkpoints.fmap(_.counter) + def registeredMembers: Set[Member] = status.membersMap.keySet + def heads: Map[Member, SequencerCounter] = checkpoints.fmap(_.counter) + + locally { + val registered = registeredMembers + val unregisteredMembersWithCounters = checkpoints.keys.filterNot(registered.contains) + require( + unregisteredMembersWithCounters.isEmpty, + s"All members with a head counter value must be registered. " + + s"Members ${unregisteredMembersWithCounters.toList} have counters but are not registered.", + ) + } - /** Return true if the head counter for the member is above the genesis counter. - * False otherwise - */ - def headCounterAboveGenesis(member: Member): Boolean = - heads.get(member).exists(_ > SequencerCounter.Genesis) def toSequencerSnapshot( lastTs: CantonTimestamp, additional: Option[SequencerSnapshot.ImplementationSpecificInfo], @@ -64,29 +69,6 @@ final case class EphemeralState( trafficBalances, ) - assert( - heads.keys.forall(registeredMembers.contains), - s"All members with a head counter value must be registered. " + - s"Members ${heads.toList.filterNot(h => registeredMembers.contains(h._1))} have head counters but are not registered.", - ) - - /** Next counter value for a single member. - * Callers must check that the member has been previously registered otherwise a [[java.lang.IllegalArgumentException]] will be thrown. - */ - def tryNextCounter(member: Member): SequencerCounter = { - require(registeredMembers contains member, s"Member ($member) must be registered") - - heads.get(member).fold(SequencerCounter.Genesis)(_ + 1) - } - - /** Generate the next counter value for the provided set of members. - * Callers must check that all members have been registered otherwise a [[java.lang.IllegalArgumentException]] will be thrown. - */ - def tryNextCounters(members: Set[Member]): Map[Member, SequencerCounter] = - members.map { member => - (member, tryNextCounter(member)) - }.toMap - def evictExpiredInFlightAggregations(upToInclusive: CantonTimestamp): EphemeralState = this.copy( inFlightAggregations = inFlightAggregations.filterNot { case (_, inFlightAggregation) => @@ -94,22 +76,42 @@ final case class EphemeralState( } ) + def toBlockUpdateEphemeralState: BlockUpdateEphemeralState = BlockUpdateEphemeralState( + checkpoints = checkpoints, + inFlightAggregations = inFlightAggregations, + membersMap = status.membersMap, + trafficState = trafficState, + ) + + def mergeBlockUpdateEphemeralState(other: BlockUpdateEphemeralState): EphemeralState = + EphemeralState( + checkpoints = other.checkpoints, + inFlightAggregations = other.inFlightAggregations, + status = this.status.copy(membersMap = other.membersMap), + trafficState = other.trafficState, + ) + + def headCounter(member: Member): Option[SequencerCounter] = checkpoints.get(member).map(_.counter) + override def pretty: Pretty[EphemeralState] = prettyOfClass( - param("heads", _.heads), + param("checkpoints", _.checkpoints), param("in-flight aggregations", _.inFlightAggregations), param("status", _.status), + param("traffic state", _.trafficState), ) } object EphemeralState { - val empty: EphemeralState = EphemeralState(Map.empty, Map.empty) - def counterToCheckpoint(counter: SequencerCounter) = + val empty: EphemeralState = + EphemeralState(Map.empty, Map.empty, InternalSequencerPruningStatus.Unimplemented) + + def counterToCheckpoint(counter: SequencerCounter): CounterCheckpoint = CounterCheckpoint(counter, CantonTimestamp.MinValue, None) def apply( heads: Map[Member, SequencerCounter], inFlightAggregations: InFlightAggregations, - status: InternalSequencerPruningStatus = InternalSequencerPruningStatus.Unimplemented, + status: InternalSequencerPruningStatus, trafficState: Map[Member, TrafficState] = Map.empty, ): EphemeralState = EphemeralState( diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/SequencerBlockStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/SequencerBlockStore.scala index 1570c3e00..953c0988e 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/SequencerBlockStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/SequencerBlockStore.scala @@ -27,7 +27,12 @@ import com.digitalasset.canton.domain.sequencing.sequencer.{ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} import com.digitalasset.canton.sequencing.OrdinarySerializedEvent -import com.digitalasset.canton.sequencing.protocol.TrafficState +import com.digitalasset.canton.sequencing.protocol.{ + AllMembersOfDomain, + Deliver, + SequencersOfDomain, + TrafficState, +} import com.digitalasset.canton.topology.Member import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.ErrorUtil @@ -262,7 +267,18 @@ trait SequencerBlockStore extends AutoCloseable { ) } - val topologyEventsInBlock = allEventsInBlock.get(topologyClientMember) + // Keep only the events addressed to the sequencer that advance `latestSequencerEventTimestamp`. + // TODO(i17741): write a security test that checks proper behavior when a sequencer is targeted directly + val topologyEventsInBlock = allEventsInBlock + .get(topologyClientMember) + .map(_.filter(_.signedEvent.content match { + case Deliver(_, _, _, _, batch, _) => + val recipients = batch.allRecipients + recipients.contains(SequencersOfDomain) || recipients.contains(AllMembersOfDomain) + case _ => false + })) + .flatMap(NonEmpty.from) + topologyEventsInBlock match { case None => // For the initial state, the latest topology client timestamp @@ -275,6 +291,7 @@ trait SequencerBlockStore extends AutoCloseable { } case Some(topologyEvents) => val lastEvent = topologyEvents.toNEF.maximumBy(_.timestamp) + ErrorUtil.requireState( currentBlock.latestSequencerEventTimestamp.contains(lastEvent.timestamp), s"The latest topology client timestamp for block ${currentBlock.height} is ${currentBlock.latestSequencerEventTimestamp}, but the last event in the block to $topologyClientMember is at ${lastEvent.timestamp}", diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala index 8811ee77a..499796672 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/db/DbSequencerBlockStore.scala @@ -24,6 +24,7 @@ import com.digitalasset.canton.domain.sequencing.integrations.state.statemanager MemberSignedEvents, MemberTimestamps, } +import com.digitalasset.canton.domain.sequencing.sequencer.store.CounterCheckpoint import com.digitalasset.canton.domain.sequencing.sequencer.{ InFlightAggregationUpdates, InternalSequencerPruningStatus, @@ -190,24 +191,25 @@ class DbSequencerBlockStore( initial: BlockEphemeralState, maybeOnboardingTopologyEffectiveTimestamp: Option[CantonTimestamp], )(implicit traceContext: TraceContext): Future[Unit] = { - val members = initial.state.status.members.toSeq + val members = initial.state.status.members val updateBlockHeight = updateBlockHeightDBIO(initial.latestBlock) val updateLowerBound = sequencerStore.saveLowerBoundDBIO( initial.state.status.lowerBound, maybeOnboardingTopologyEffectiveTimestamp, ) - val writeInitialCounters = initial.state.heads.toSeq.map { case (member, counter) => - initial.state.trafficState - .get(member) - .map { memberTrafficState => - upsertMemberInitialStateWithTraffic( - member, - counter, - memberTrafficState, - ) - } - .getOrElse(upsertMemberInitialState(member, counter)) + val writeInitialCounters = initial.state.checkpoints.toSeq.map { + case (member, CounterCheckpoint(counter, _, _)) => + initial.state.trafficState + .get(member) + .map { memberTrafficState => + upsertMemberInitialStateWithTraffic( + member, + counter, + memberTrafficState, + ) + } + .getOrElse(upsertMemberInitialState(member, counter)) } val addMembers = members.map(member => sequencerStore.addMemberDBIO(member.member, member.registeredAt)) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/memory/InMemorySequencerBlockStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/memory/InMemorySequencerBlockStore.scala index 6e6983995..7a9ce0054 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/memory/InMemorySequencerBlockStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/block/data/memory/InMemorySequencerBlockStore.scala @@ -4,6 +4,7 @@ package com.digitalasset.canton.domain.block.data.memory import cats.data.EitherT +import cats.syntax.foldable.* import cats.syntax.functor.* import com.digitalasset.canton.SequencerCounter import com.digitalasset.canton.concurrent.DirectExecutionContext @@ -80,9 +81,8 @@ class InMemorySequencerBlockStore( _ <- Future.traverse(initial.state.status.members.sortBy(_.registeredAt))(m => for { _ <- sequencerStore.addMember(m.member, m.registeredAt) - _ <- m.lastAcknowledged match { - case Some(ts) => sequencerStore.acknowledge(m.member, ts) - case _ => Future.unit + _ <- m.lastAcknowledged.traverse_ { ts => + sequencerStore.acknowledge(m.member, ts) } _ <- if (m.enabled) Future.unit else sequencerStore.disableMember(m.member) } yield () @@ -150,6 +150,8 @@ class InMemorySequencerBlockStore( )(implicit traceContext: TraceContext): Source[OrdinarySerializedEvent, NotUsed] = sequencerStore.readRange(member, startInclusive, endExclusive) + // TODO(#17726) Andreas: Figure out whether we can pull the readAtBlockTimestamp out + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) override def readHead(implicit traceContext: TraceContext): Future[BlockEphemeralState] = blocking(blockToTimestampMap.synchronized { blockToTimestampMap.keys.maxOption match { @@ -252,7 +254,7 @@ class InMemorySequencerBlockStore( ): Future[Unit] = Future.successful(initialState.getAndUpdate { previousState => // Don't update member counter if specified counter is less than or equal that previous counter. - val prevCounter = previousState.state.heads.get(member) + val prevCounter = previousState.state.headCounter(member) if (prevCounter.exists(_ >= counterLastUnsupported)) previousState else previousState diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala index 75aa9cd9c..b2b33e7c2 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/config/DomainParametersConfig.scala @@ -13,7 +13,7 @@ import com.digitalasset.canton.crypto.CryptoFactory.{ } import com.digitalasset.canton.crypto.* import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} -import com.digitalasset.canton.protocol.{AcsCommitmentsCatchUpConfig, StaticDomainParameters} +import com.digitalasset.canton.protocol.StaticDomainParameters import com.digitalasset.canton.version.{DomainProtocolVersion, ProtocolVersion} /** Configuration of domain parameters that all members connecting to a domain must adhere to. @@ -29,7 +29,6 @@ import com.digitalasset.canton.version.{DomainProtocolVersion, ProtocolVersion} * @param requiredCryptoKeyFormats The optional required crypto key formats that a member has to support. If none is specified, all the supported algorithms are required. * @param protocolVersion The protocol version spoken on the domain. All participants and domain nodes attempting to connect to the sequencer need to support this protocol version to connect. * @param dontWarnOnDeprecatedPV If true, then this domain will not emit a warning when configured to use a deprecated protocol version (such as 2.0.0). - * @param acsCommitmentsCatchUp The optional catch up parameters of type [[com.digitalasset.canton.protocol.AcsCommitmentsCatchUpConfig]]. If None is specified, then the catch-up mode is disabled. */ final case class DomainParametersConfig( requiredSigningKeySchemes: Option[NonEmpty[Set[SigningKeyScheme]]] = None, @@ -40,7 +39,6 @@ final case class DomainParametersConfig( protocolVersion: DomainProtocolVersion = DomainProtocolVersion( ProtocolVersion.latest ), - acsCommitmentsCatchUp: Option[AcsCommitmentsCatchUpConfig] = None, // TODO(i15561): Revert back to `false` once there is a stable Daml 3 protocol version override val devVersionSupport: Boolean = true, override val dontWarnOnDeprecatedPV: Boolean = false, @@ -56,7 +54,6 @@ final case class DomainParametersConfig( param("protocolVersion", _.protocolVersion.version), param("devVersionSupport", _.devVersionSupport), param("dontWarnOnDeprecatedPV", _.dontWarnOnDeprecatedPV), - param("acsCommitmentsCatchUp", _.acsCommitmentsCatchUp), ) override def initialProtocolVersion: ProtocolVersion = protocolVersion.version @@ -113,7 +110,6 @@ final case class DomainParametersConfig( requiredHashAlgorithms = newRequiredHashAlgorithms, requiredCryptoKeyFormats = newCryptoKeyFormats, protocolVersion = protocolVersion.unwrap, - acsCommitmentsCatchUp = acsCommitmentsCatchUp, ) } } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala index 4cce6c5cc..c234f4f5a 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/Mediator.scala @@ -68,8 +68,8 @@ private[mediator] class Mediator( val topologyClient: DomainTopologyClientWithInit, private[canton] val syncCrypto: DomainSyncCryptoClient, topologyTransactionProcessor: TopologyTransactionProcessorCommon, - val topologyManagerStatusO: Option[TopologyManagerStatus], - val domainOutboxStatusO: Option[DomainOutboxStatus], + val topologyManagerStatus: TopologyManagerStatus, + val domainOutboxStatus: DomainOutboxStatus, timeTrackerConfig: DomainTimeTrackerConfig, state: MediatorState, private[canton] val sequencerCounterTrackerStore: SequencerCounterTrackerStore, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala index 79e563300..97dfb16d7 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNode.scala @@ -3,18 +3,73 @@ package com.digitalasset.canton.domain.mediator +import cats.Monad +import cats.data.EitherT +import cats.instances.future.* +import cats.syntax.either.* +import com.daml.grpc.adapter.ExecutionSequencerFactory +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.DomainAlias +import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader +import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService import com.digitalasset.canton.config.* +import com.digitalasset.canton.crypto.{ + Crypto, + CryptoHandshakeValidator, + DomainSyncCryptoClient, + Fingerprint, +} +import com.digitalasset.canton.domain.Domain +import com.digitalasset.canton.domain.mediator.admin.gprc.{ + InitializeMediatorRequestX, + InitializeMediatorResponseX, +} +import com.digitalasset.canton.domain.mediator.service.GrpcMediatorInitializationServiceX +import com.digitalasset.canton.domain.mediator.store.{ + MediatorDomainConfiguration, + MediatorDomainConfigurationStore, +} +import com.digitalasset.canton.domain.metrics.MediatorMetrics +import com.digitalasset.canton.domain.service.GrpcSequencerConnectionService import com.digitalasset.canton.environment.* -import com.digitalasset.canton.health.ComponentStatus import com.digitalasset.canton.health.admin.data.MediatorNodeStatus -import com.digitalasset.canton.lifecycle.Lifecycle +import com.digitalasset.canton.health.{ + ComponentStatus, + GrpcHealthReporter, + HealthService, + MutableHealthComponent, +} +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, HasCloseContext, Lifecycle} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.mediator.admin.v30.MediatorInitializationServiceGrpc import com.digitalasset.canton.resource.Storage -import com.digitalasset.canton.sequencing.client.SequencerClientConfig +import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.sequencing.client.{ + RequestSigner, + SequencerClient, + SequencerClientConfig, + SequencerClientFactory, +} +import com.digitalasset.canton.store.db.SequencerClientDiscriminator +import com.digitalasset.canton.store.{ + IndexedStringStore, + SendTrackerStore, + SequencedEventStore, + SequencerCounterTrackerStore, +} import com.digitalasset.canton.time.{Clock, HasUptime} -import com.digitalasset.canton.topology.{DomainId, MediatorId} -import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.topology.* +import com.digitalasset.canton.topology.processing.TopologyTransactionProcessorX +import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore +import com.digitalasset.canton.topology.store.TopologyStoreX +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.{ResourceUtil, SingleUseCell} +import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionCompatibility} +import monocle.Lens +import monocle.macros.syntax.lens.* +import org.apache.pekko.actor.ActorSystem +import java.util.concurrent.ScheduledExecutorService import scala.concurrent.Future abstract class MediatorNodeConfigCommon( @@ -64,15 +119,536 @@ final case class RemoteMediatorConfig( override def clientAdminApi: ClientConfig = adminApi } -// TODO(#15161): Fold MediatorNodeCommon into MediatorNodeX -class MediatorNodeCommon( +/** Community Mediator Node configuration that defaults to auto-init + */ +final case class CommunityMediatorNodeXConfig( + override val adminApi: CommunityAdminServerConfig = CommunityAdminServerConfig(), + override val storage: CommunityStorageConfig = CommunityStorageConfig.Memory(), + override val crypto: CommunityCryptoConfig = CommunityCryptoConfig(), + override val init: InitConfig = InitConfig(identity = Some(InitConfigBase.Identity())), + override val timeTracker: DomainTimeTrackerConfig = DomainTimeTrackerConfig(), + override val sequencerClient: SequencerClientConfig = SequencerClientConfig(), + override val caching: CachingConfigs = CachingConfigs(), + override val parameters: MediatorNodeParameterConfig = MediatorNodeParameterConfig(), + override val monitoring: NodeMonitoringConfig = NodeMonitoringConfig(), + override val topology: TopologyConfig = TopologyConfig(), +) extends MediatorNodeConfigCommon( + adminApi, + storage, + crypto, + init, + timeTracker, + sequencerClient, + caching, + parameters, + monitoring, + ) + with ConfigDefaults[DefaultPorts, CommunityMediatorNodeXConfig] { + + override val nodeTypeName: String = "mediator" + + override def replicationEnabled: Boolean = false + + override def withDefaults(ports: DefaultPorts): CommunityMediatorNodeXConfig = { + this + .focus(_.adminApi.internalPort) + .modify(ports.mediatorAdminApiPort.setDefaultPort) + } +} + +class MediatorNodeBootstrapX( + arguments: CantonNodeBootstrapCommonArguments[ + MediatorNodeConfigCommon, + MediatorNodeParameters, + MediatorMetrics, + ], + protected val replicaManager: MediatorReplicaManager, + mediatorRuntimeFactory: MediatorRuntimeFactory, +)( + implicit executionContext: ExecutionContextIdlenessExecutorService, + implicit val executionSequencerFactory: ExecutionSequencerFactory, + scheduler: ScheduledExecutorService, + actorSystem: ActorSystem, +) extends CantonNodeBootstrapX[ + MediatorNode, + MediatorNodeConfigCommon, + MediatorNodeParameters, + MediatorMetrics, + ](arguments) { + + override protected def member(uid: UniqueIdentifier): Member = MediatorId(uid) + + private val domainTopologyManager = new SingleUseCell[DomainTopologyManagerX]() + + override protected def sequencedTopologyStores: Seq[TopologyStoreX[DomainStore]] = + domainTopologyManager.get.map(_.store).toList + + override protected def sequencedTopologyManagers: Seq[DomainTopologyManagerX] = + domainTopologyManager.get.toList + + private lazy val deferredSequencerClientHealth = + MutableHealthComponent(loggerFactory, SequencerClient.healthName, timeouts) + override protected def mkNodeHealthService(storage: Storage): HealthService = + HealthService( + "mediator", + logger, + timeouts, + Seq(storage), + softDependencies = Seq(deferredSequencerClientHealth), + ) + private class WaitForMediatorToDomainInit( + storage: Storage, + crypto: Crypto, + mediatorId: MediatorId, + authorizedTopologyManager: AuthorizedTopologyManagerX, + healthService: HealthService, + ) extends BootstrapStageWithStorage[MediatorNode, StartupNode, DomainId]( + "wait-for-mediator-to-domain-init", + bootstrapStageCallback, + storage, + config.init.autoInit, + ) + with GrpcMediatorInitializationServiceX.Callback { + + adminServerRegistry + .addServiceU( + MediatorInitializationServiceGrpc + .bindService( + new GrpcMediatorInitializationServiceX(this, loggerFactory), + executionContext, + ), + true, + ) + + protected val domainConfigurationStore = + MediatorDomainConfigurationStore(storage, timeouts, loggerFactory) + addCloseable(domainConfigurationStore) + addCloseable(deferredSequencerClientHealth) + + override protected def stageCompleted(implicit + traceContext: TraceContext + ): Future[Option[DomainId]] = domainConfigurationStore.fetchConfiguration.toOption.mapFilter { + case Some(res) => Some(res.domainId) + case None => None + }.value + + override protected def buildNextStage(domainId: DomainId): StartupNode = { + val domainTopologyStore = + TopologyStoreX(DomainStore(domainId), storage, timeouts, loggerFactory) + addCloseable(domainTopologyStore) + + val outboxQueue = new DomainOutboxQueue(loggerFactory) + val topologyManager = new DomainTopologyManagerX( + clock = clock, + crypto = crypto, + store = domainTopologyStore, + outboxQueue = outboxQueue, + enableTopologyTransactionValidation = config.topology.enableTopologyTransactionValidation, + timeouts = timeouts, + futureSupervisor = futureSupervisor, + loggerFactory = loggerFactory, + ) + + if (domainTopologyManager.putIfAbsent(topologyManager).nonEmpty) { + // TODO(#14048) how to handle this error properly? + throw new IllegalStateException("domainTopologyManager shouldn't have been set before") + } + + new StartupNode( + storage, + crypto, + mediatorId, + authorizedTopologyManager, + topologyManager, + domainId, + domainConfigurationStore, + domainTopologyStore, + healthService, + ) + } + + override protected def autoCompleteStage() + : EitherT[FutureUnlessShutdown, String, Option[DomainId]] = + EitherT.rightT(None) // this stage doesn't have auto-init + + override def initialize(request: InitializeMediatorRequestX)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, InitializeMediatorResponseX] = { + if (isInitialized) { + logger.info( + "Received a request to initialize an already initialized mediator. Skipping initialization!" + ) + EitherT.pure(InitializeMediatorResponseX()) + } else { + val configToStore = MediatorDomainConfiguration( + Fingerprint.tryCreate( + "unused" + ), // x-nodes do not need to return the initial key + request.domainId, + request.domainParameters, + request.sequencerConnections, + ) + val sequencerInfoLoader = createSequencerInfoLoader(configToStore) + lazy val validatedET = sequencerInfoLoader.validateSequencerConnection( + DomainAlias.tryCreate("domain"), + Some(request.domainId), + configToStore.sequencerConnections, + request.sequencerConnectionValidation, + ) + completeWithExternal { + logger.info( + s"Assigning mediator to ${request.domainId} via sequencers ${request.sequencerConnections}" + ) + for { + _ <- validatedET.leftMap { errors => + s"Invalid sequencer connections provided for initialisation: $errors" + } + _ <- + domainConfigurationStore + .saveConfiguration(configToStore) + .leftMap(_.toString) + } yield request.domainId + }.map(_ => InitializeMediatorResponseX()) + } + } + + } + + private class StartupNode( + storage: Storage, + crypto: Crypto, + mediatorId: MediatorId, + authorizedTopologyManager: AuthorizedTopologyManagerX, + domainTopologyManager: DomainTopologyManagerX, + domainId: DomainId, + domainConfigurationStore: MediatorDomainConfigurationStore, + domainTopologyStore: TopologyStoreX[DomainStore], + healthService: HealthService, + ) extends BootstrapStage[MediatorNode, RunningNode[MediatorNode]]( + description = "Startup mediator node", + bootstrapStageCallback, + ) + with HasCloseContext { + + private val domainLoggerFactory = loggerFactory.append("domainId", domainId.toString) + + override protected def attempt()(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, String, Option[RunningNode[MediatorNode]]] = { + + val domainOutboxFactory = new DomainOutboxXFactory( + domainId = domainId, + memberId = mediatorId, + authorizedTopologyManager = authorizedTopologyManager, + domainTopologyManager = domainTopologyManager, + crypto = crypto, + topologyXConfig = config.topology, + timeouts = timeouts, + loggerFactory = domainLoggerFactory, + futureSupervisor = arguments.futureSupervisor, + ) + performUnlessClosingEitherU("starting up mediator node") { + val indexedStringStore = IndexedStringStore.create( + storage, + parameterConfig.cachingConfigs.indexedStrings, + timeouts, + domainLoggerFactory, + ) + addCloseable(indexedStringStore) + for { + domainId <- initializeNodePrerequisites( + storage, + crypto, + mediatorId, + () => domainConfigurationStore.fetchConfiguration.leftMap(_.toString), + domainConfigurationStore.saveConfiguration(_).leftMap(_.toString), + indexedStringStore, + domainTopologyStore, + TopologyManagerStatus.combined(authorizedTopologyManager, domainTopologyManager), + domainTopologyStateInit = + new StoreBasedDomainTopologyInitializationCallback(mediatorId, domainTopologyStore), + domainOutboxFactory = domainOutboxFactory, + ) + } yield { + val node = new MediatorNode( + arguments.config, + mediatorId, + domainId, + replicaManager, + storage, + clock, + domainLoggerFactory, + healthData = healthService.dependencies.map(_.toComponentStatus), + ) + addCloseable(node) + Some(new RunningNode(bootstrapStageCallback, node)) + } + } + } + } + + private def createSequencerInfoLoader(config: MediatorDomainConfiguration) = + new SequencerInfoLoader( + timeouts = timeouts, + traceContextPropagation = parameters.tracing.propagation, + clientProtocolVersions = NonEmpty.mk(Seq, config.domainParameters.protocolVersion), + minimumProtocolVersion = Some(config.domainParameters.protocolVersion), + dontWarnOnDeprecatedPV = parameterConfig.dontWarnOnDeprecatedPV, + loggerFactory = loggerFactory, + ) + + protected def initializeNodePrerequisites( + storage: Storage, + crypto: Crypto, + mediatorId: MediatorId, + fetchConfig: () => EitherT[Future, String, Option[MediatorDomainConfiguration]], + saveConfig: MediatorDomainConfiguration => EitherT[Future, String, Unit], + indexedStringStore: IndexedStringStore, + domainTopologyStore: TopologyStoreX[DomainStore], + topologyManagerStatus: TopologyManagerStatus, + domainTopologyStateInit: DomainTopologyInitializationCallback, + domainOutboxFactory: DomainOutboxXFactory, + ): EitherT[Future, String, DomainId] = + for { + domainConfig <- fetchConfig() + .leftMap(err => s"Failed to fetch domain configuration: $err") + .flatMap { x => + EitherT.fromEither( + x.toRight( + s"Mediator domain config has not been set. Must first be initialized by the domain in order to start." + ) + ) + } + + _ <- EitherT.right[String]( + replicaManager.setup( + adminServerRegistry, + () => + mkMediatorRuntime( + mediatorId, + domainConfig, + indexedStringStore, + fetchConfig, + saveConfig, + storage, + crypto, + domainTopologyStore, + topologyManagerStatus, + domainTopologyStateInit, + domainOutboxFactory, + ), + storage.isActive, + ) + ) + } yield domainConfig.domainId + + private def mkMediatorRuntime( + mediatorId: MediatorId, + domainConfig: MediatorDomainConfiguration, + indexedStringStore: IndexedStringStore, + fetchConfig: () => EitherT[Future, String, Option[MediatorDomainConfiguration]], + saveConfig: MediatorDomainConfiguration => EitherT[Future, String, Unit], + storage: Storage, + crypto: Crypto, + domainTopologyStore: TopologyStoreX[DomainStore], + topologyManagerStatus: TopologyManagerStatus, + domainTopologyStateInit: DomainTopologyInitializationCallback, + domainOutboxFactory: DomainOutboxXFactory, + ): EitherT[Future, String, MediatorRuntime] = { + val domainId = domainConfig.domainId + val domainLoggerFactory = loggerFactory.append("domainId", domainId.toString) + val domainAlias = DomainAlias(domainConfig.domainId.uid.toLengthLimitedString) + val sequencerInfoLoader = createSequencerInfoLoader(domainConfig) + def getSequencerConnectionFromStore = fetchConfig() + .map(_.map(_.sequencerConnections)) + + for { + _ <- CryptoHandshakeValidator + .validate(domainConfig.domainParameters, config.crypto) + .toEitherT + sequencedEventStore = SequencedEventStore( + storage, + SequencerClientDiscriminator.UniqueDiscriminator, + domainConfig.domainParameters.protocolVersion, + timeouts, + domainLoggerFactory, + ) + sendTrackerStore = SendTrackerStore(storage) + sequencerCounterTrackerStore = SequencerCounterTrackerStore( + storage, + SequencerClientDiscriminator.UniqueDiscriminator, + timeouts, + domainLoggerFactory, + ) + topologyProcessorAndClient <- + EitherT.right( + TopologyTransactionProcessorX.createProcessorAndClientForDomain( + domainTopologyStore, + domainId, + domainConfig.domainParameters.protocolVersion, + crypto.pureCrypto, + arguments.parameterConfig, + config.topology.enableTopologyTransactionValidation, + arguments.clock, + arguments.futureSupervisor, + domainLoggerFactory, + ) + ) + (topologyProcessor, topologyClient) = topologyProcessorAndClient + _ = ips.add(topologyClient) + syncCryptoApi = new DomainSyncCryptoClient( + mediatorId, + domainId, + topologyClient, + crypto, + parameters.cachingConfigs, + timeouts, + futureSupervisor, + domainLoggerFactory, + ) + sequencerClientFactory = SequencerClientFactory( + domainId, + syncCryptoApi, + crypto, + parameters.sequencerClient, + parameters.tracing.propagation, + arguments.testingConfig, + domainConfig.domainParameters, + timeouts, + clock, + topologyClient, + futureSupervisor, + member => + Domain.recordSequencerInteractions + .get() + .lift(member) + .map(Domain.setMemberRecordingPath(member)), + member => + Domain.replaySequencerConfig.get().lift(member).map(Domain.defaultReplayPath(member)), + arguments.metrics.sequencerClient, + parameters.loggingConfig, + domainLoggerFactory, + ProtocolVersionCompatibility.trySupportedProtocolsDomain(parameters), + None, + ) + sequencerClientRef = + GrpcSequencerConnectionService.setup[MediatorDomainConfiguration](mediatorId)( + adminServerRegistry, + fetchConfig, + saveConfig, + Lens[MediatorDomainConfiguration, SequencerConnections](_.sequencerConnections)( + connection => conf => conf.copy(sequencerConnections = connection) + ), + RequestSigner(syncCryptoApi, domainConfig.domainParameters.protocolVersion), + sequencerClientFactory, + sequencerInfoLoader, + domainAlias, + ) + // we wait here until the sequencer becomes active. this allows to reconfigure the + // sequencer client address + info <- GrpcSequencerConnectionService.waitUntilSequencerConnectionIsValid( + sequencerInfoLoader, + this, + futureSupervisor, + getSequencerConnectionFromStore, + ) + + requestSigner = RequestSigner(syncCryptoApi, domainConfig.domainParameters.protocolVersion) + _ <- { + val headSnapshot = topologyClient.headSnapshot + for { + // TODO(i12076): Request topology information from all sequencers and reconcile + isMediatorActive <- EitherT.right[String](headSnapshot.isMediatorActive(mediatorId)) + _ <- Monad[EitherT[Future, String, *]].whenA(!isMediatorActive)( + sequencerClientFactory + .makeTransport( + info.sequencerConnections.default, + mediatorId, + requestSigner, + allowReplay = false, + ) + .flatMap( + ResourceUtil.withResourceEitherT(_)( + domainTopologyStateInit + .callback(topologyClient, _, domainConfig.domainParameters.protocolVersion) + ) + ) + ) + } yield {} + } + + sequencerClient <- sequencerClientFactory.create( + mediatorId, + sequencedEventStore, + sendTrackerStore, + requestSigner, + info.sequencerConnections, + info.expectedSequencers, + ) + + _ = sequencerClientRef.set(sequencerClient) + _ = deferredSequencerClientHealth.set(sequencerClient.healthComponent) + + // can just new up the enterprise mediator factory here as the mediator node is only available in enterprise setups + mediatorRuntime <- mediatorRuntimeFactory.create( + mediatorId, + domainId, + storage, + sequencerCounterTrackerStore, + sequencedEventStore, + sequencerClient, + syncCryptoApi, + topologyClient, + topologyProcessor, + topologyManagerStatus, + domainOutboxFactory, + config.timeTracker, + parameters, + domainConfig.domainParameters.protocolVersion, + clock, + arguments.metrics, + futureSupervisor, + domainLoggerFactory, + ) + _ <- mediatorRuntime.start() + } yield mediatorRuntime + } + + override protected def customNodeStages( + storage: Storage, + crypto: Crypto, + nodeId: UniqueIdentifier, + authorizedTopologyManager: AuthorizedTopologyManagerX, + healthServer: GrpcHealthReporter, + healthService: HealthService, + ): BootstrapStageOrLeaf[MediatorNode] = { + new WaitForMediatorToDomainInit( + storage, + crypto, + MediatorId(nodeId), + authorizedTopologyManager, + healthService, + ) + } + + override protected def onClosed(): Unit = { + super.onClosed() + } + +} + +object MediatorNodeBootstrapX { + val LoggerFactoryKeyName: String = "mediator" +} + +class MediatorNode( config: MediatorNodeConfigCommon, mediatorId: MediatorId, domainId: DomainId, - replicaManager: MediatorReplicaManager, + protected[canton] val replicaManager: MediatorReplicaManager, storage: Storage, - override protected val clock: Clock, - override protected val loggerFactory: NamedLoggerFactory, + override val clock: Clock, + override val loggerFactory: NamedLoggerFactory, healthData: => Seq[ComponentStatus], ) extends CantonNode with NamedLogging @@ -100,4 +676,5 @@ class MediatorNodeCommon( replicaManager, storage, )(logger) + } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeCommon.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeCommon.scala deleted file mode 100644 index d285b8d4c..000000000 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeCommon.scala +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.domain.mediator - -import cats.Monad -import cats.data.EitherT -import cats.instances.future.* -import cats.syntax.either.* -import com.daml.grpc.adapter.ExecutionSequencerFactory -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.DomainAlias -import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader -import com.digitalasset.canton.config.* -import com.digitalasset.canton.crypto.* -import com.digitalasset.canton.domain.Domain -import com.digitalasset.canton.domain.mediator.store.MediatorDomainConfiguration -import com.digitalasset.canton.domain.metrics.MediatorMetrics -import com.digitalasset.canton.domain.service.GrpcSequencerConnectionService -import com.digitalasset.canton.environment.* -import com.digitalasset.canton.health.{HealthService, MutableHealthComponent} -import com.digitalasset.canton.resource.Storage -import com.digitalasset.canton.sequencing.SequencerConnections -import com.digitalasset.canton.sequencing.client.{ - RequestSigner, - SequencerClient, - SequencerClientFactory, -} -import com.digitalasset.canton.store.db.SequencerClientDiscriminator -import com.digitalasset.canton.store.{ - IndexedStringStore, - SendTrackerStore, - SequencedEventStore, - SequencerCounterTrackerStore, -} -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit -import com.digitalasset.canton.topology.processing.TopologyTransactionProcessorCommon -import com.digitalasset.canton.util.ResourceUtil -import com.digitalasset.canton.version.{ProtocolVersion, ProtocolVersionCompatibility} -import monocle.Lens - -import scala.concurrent.Future - -trait MediatorNodeBootstrapCommon[ - T <: CantonNode, - NC <: LocalNodeConfig & MediatorNodeConfigCommon, -] { - - this: CantonNodeBootstrapCommon[T, NC, MediatorNodeParameters, MediatorMetrics] => - - type TopologyComponentFactory = (DomainId, ProtocolVersion) => EitherT[ - Future, - String, - (TopologyTransactionProcessorCommon, DomainTopologyClientWithInit), - ] - - protected val replicaManager: MediatorReplicaManager - - protected def mediatorRuntimeFactory: MediatorRuntimeFactory - - protected lazy val deferredSequencerClientHealth = - MutableHealthComponent(loggerFactory, SequencerClient.healthName, timeouts) - - protected implicit def executionSequencerFactory: ExecutionSequencerFactory - - override protected def mkNodeHealthService(storage: Storage): HealthService = - HealthService( - "mediator", - logger, - timeouts, - Seq(storage), - softDependencies = Seq(deferredSequencerClientHealth), - ) - - /** Attempt to start the node with this identity. */ - protected def initializeNodePrerequisites( - storage: Storage, - crypto: Crypto, - mediatorId: MediatorId, - fetchConfig: () => EitherT[Future, String, Option[MediatorDomainConfiguration]], - saveConfig: MediatorDomainConfiguration => EitherT[Future, String, Unit], - indexedStringStore: IndexedStringStore, - topologyComponentFactory: TopologyComponentFactory, - topologyManagerStatusO: Option[TopologyManagerStatus], - maybeDomainTopologyStateInit: Option[DomainTopologyInitializationCallback], - maybeDomainOutboxFactory: Option[DomainOutboxXFactory], - ): EitherT[Future, String, DomainId] = - for { - domainConfig <- fetchConfig() - .leftMap(err => s"Failed to fetch domain configuration: $err") - .flatMap { x => - EitherT.fromEither( - x.toRight( - s"Mediator domain config has not been set. Must first be initialized by the domain in order to start." - ) - ) - } - - sequencerInfoLoader = new SequencerInfoLoader( - timeouts = timeouts, - traceContextPropagation = parameters.tracing.propagation, - clientProtocolVersions = NonEmpty.mk(Seq, domainConfig.domainParameters.protocolVersion), - minimumProtocolVersion = Some(domainConfig.domainParameters.protocolVersion), - dontWarnOnDeprecatedPV = parameterConfig.dontWarnOnDeprecatedPV, - loggerFactory = loggerFactory.append("domainId", domainConfig.domainId.toString), - ) - - _ <- EitherT.right[String]( - replicaManager.setup( - adminServerRegistry, - () => - mkMediatorRuntime( - mediatorId, - domainConfig, - indexedStringStore, - fetchConfig, - saveConfig, - storage, - crypto, - topologyComponentFactory, - topologyManagerStatusO, - maybeDomainTopologyStateInit, - maybeDomainOutboxFactory, - sequencerInfoLoader, - ), - storage.isActive, - ) - ) - } yield domainConfig.domainId - - private def mkMediatorRuntime( - mediatorId: MediatorId, - domainConfig: MediatorDomainConfiguration, - indexedStringStore: IndexedStringStore, - fetchConfig: () => EitherT[Future, String, Option[MediatorDomainConfiguration]], - saveConfig: MediatorDomainConfiguration => EitherT[Future, String, Unit], - storage: Storage, - crypto: Crypto, - topologyComponentFactory: TopologyComponentFactory, - topologyManagerStatusO: Option[TopologyManagerStatus], - maybeDomainTopologyStateInit: Option[DomainTopologyInitializationCallback], - maybeDomainOutboxFactory: Option[DomainOutboxXFactory], - sequencerInfoLoader: SequencerInfoLoader, - ): EitherT[Future, String, MediatorRuntime] = { - val domainId = domainConfig.domainId - val domainLoggerFactory = loggerFactory.append("domainId", domainId.toString) - val domainAlias = DomainAlias(domainConfig.domainId.uid.toLengthLimitedString) - - def getSequencerConnectionFromStore = fetchConfig() - .map(_.map(_.sequencerConnections)) - - for { - _ <- CryptoHandshakeValidator - .validate(domainConfig.domainParameters, config.crypto) - .toEitherT - sequencedEventStore = SequencedEventStore( - storage, - SequencerClientDiscriminator.UniqueDiscriminator, - domainConfig.domainParameters.protocolVersion, - timeouts, - domainLoggerFactory, - ) - sendTrackerStore = SendTrackerStore(storage) - sequencerCounterTrackerStore = SequencerCounterTrackerStore( - storage, - SequencerClientDiscriminator.UniqueDiscriminator, - timeouts, - domainLoggerFactory, - ) - topologyProcessorAndClient <- topologyComponentFactory( - domainId, - domainConfig.domainParameters.protocolVersion, - ) - (topologyProcessor, topologyClient) = topologyProcessorAndClient - _ = ips.add(topologyClient) - syncCryptoApi = new DomainSyncCryptoClient( - mediatorId, - domainId, - topologyClient, - crypto, - parameters.cachingConfigs, - timeouts, - futureSupervisor, - domainLoggerFactory, - ) - sequencerClientFactory = SequencerClientFactory( - domainId, - syncCryptoApi, - crypto, - parameters.sequencerClient, - parameters.tracing.propagation, - arguments.testingConfig, - domainConfig.domainParameters, - timeouts, - clock, - topologyClient, - futureSupervisor, - member => - Domain.recordSequencerInteractions - .get() - .lift(member) - .map(Domain.setMemberRecordingPath(member)), - member => - Domain.replaySequencerConfig.get().lift(member).map(Domain.defaultReplayPath(member)), - arguments.metrics.sequencerClient, - parameters.loggingConfig, - domainLoggerFactory, - ProtocolVersionCompatibility.trySupportedProtocolsDomain(parameters), - None, - ) - sequencerClientRef = - GrpcSequencerConnectionService.setup[MediatorDomainConfiguration](mediatorId)( - adminServerRegistry, - fetchConfig, - saveConfig, - Lens[MediatorDomainConfiguration, SequencerConnections](_.sequencerConnections)( - connection => conf => conf.copy(sequencerConnections = connection) - ), - RequestSigner(syncCryptoApi, domainConfig.domainParameters.protocolVersion), - sequencerClientFactory, - sequencerInfoLoader, - domainAlias, - ) - // we wait here until the sequencer becomes active. this allows to reconfigure the - // sequencer client address - connections <- GrpcSequencerConnectionService.waitUntilSequencerConnectionIsValid( - sequencerClientFactory, - this, - futureSupervisor, - getSequencerConnectionFromStore, - ) - info <- sequencerInfoLoader - .loadSequencerEndpoints( - domainAlias, - connections, - ) - .leftMap(_.cause) - requestSigner = RequestSigner(syncCryptoApi, domainConfig.domainParameters.protocolVersion) - _ <- maybeDomainTopologyStateInit match { - case Some(domainTopologyStateInit) => - val headSnapshot = topologyClient.headSnapshot - for { - // TODO(i12076): Request topology information from all sequencers and reconcile - isMediatorActive <- EitherT.right[String](headSnapshot.isMediatorActive(mediatorId)) - _ <- Monad[EitherT[Future, String, *]].whenA(!isMediatorActive)( - sequencerClientFactory - .makeTransport( - info.sequencerConnections.default, - mediatorId, - requestSigner, - allowReplay = false, - ) - .flatMap( - ResourceUtil.withResourceEitherT(_)( - domainTopologyStateInit - .callback(topologyClient, _, domainConfig.domainParameters.protocolVersion) - ) - ) - ) - } yield {} - - case None => EitherT.pure[Future, String](()) - } - - sequencerClient <- sequencerClientFactory.create( - mediatorId, - sequencedEventStore, - sendTrackerStore, - requestSigner, - info.sequencerConnections, - info.expectedSequencers, - ) - - _ = sequencerClientRef.set(sequencerClient) - _ = deferredSequencerClientHealth.set(sequencerClient.healthComponent) - - // can just new up the enterprise mediator factory here as the mediator node is only available in enterprise setups - mediatorRuntime <- mediatorRuntimeFactory.create( - mediatorId, - domainId, - storage, - sequencerCounterTrackerStore, - sequencedEventStore, - sequencerClient, - syncCryptoApi, - topologyClient, - topologyProcessor, - topologyManagerStatusO, - maybeDomainOutboxFactory, - config.timeTracker, - parameters, - domainConfig.domainParameters.protocolVersion, - clock, - arguments.metrics, - futureSupervisor, - domainLoggerFactory, - ) - _ <- mediatorRuntime.start() - } yield mediatorRuntime - } - -} diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeX.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeX.scala deleted file mode 100644 index 93c9cf3ed..000000000 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorNodeX.scala +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.domain.mediator - -import cats.data.EitherT -import com.daml.grpc.adapter.ExecutionSequencerFactory -import com.digitalasset.canton.concurrent.ExecutionContextIdlenessExecutorService -import com.digitalasset.canton.config.* -import com.digitalasset.canton.crypto.{Crypto, Fingerprint} -import com.digitalasset.canton.domain.admin.v30.MediatorInitializationServiceGrpc -import com.digitalasset.canton.domain.mediator.admin.gprc.{ - InitializeMediatorRequestX, - InitializeMediatorResponseX, -} -import com.digitalasset.canton.domain.mediator.service.GrpcMediatorInitializationServiceX -import com.digitalasset.canton.domain.mediator.store.{ - MediatorDomainConfiguration, - MediatorDomainConfigurationStore, -} -import com.digitalasset.canton.domain.metrics.MediatorMetrics -import com.digitalasset.canton.environment.* -import com.digitalasset.canton.health.{ComponentStatus, GrpcHealthReporter, HealthService} -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, HasCloseContext} -import com.digitalasset.canton.logging.NamedLoggerFactory -import com.digitalasset.canton.resource.Storage -import com.digitalasset.canton.sequencing.client.SequencerClientConfig -import com.digitalasset.canton.store.IndexedStringStore -import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.* -import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit -import com.digitalasset.canton.topology.processing.{ - TopologyTransactionProcessorCommon, - TopologyTransactionProcessorX, -} -import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore -import com.digitalasset.canton.topology.store.TopologyStoreX -import com.digitalasset.canton.tracing.TraceContext -import com.digitalasset.canton.util.SingleUseCell -import com.digitalasset.canton.version.ProtocolVersion -import monocle.macros.syntax.lens.* -import org.apache.pekko.actor.ActorSystem - -import java.util.concurrent.ScheduledExecutorService -import scala.concurrent.Future - -/** Community Mediator Node X configuration that defaults to auto-init - */ -final case class CommunityMediatorNodeXConfig( - override val adminApi: CommunityAdminServerConfig = CommunityAdminServerConfig(), - override val storage: CommunityStorageConfig = CommunityStorageConfig.Memory(), - override val crypto: CommunityCryptoConfig = CommunityCryptoConfig(), - override val init: InitConfig = InitConfig(identity = Some(InitConfigBase.Identity())), - override val timeTracker: DomainTimeTrackerConfig = DomainTimeTrackerConfig(), - override val sequencerClient: SequencerClientConfig = SequencerClientConfig(), - override val caching: CachingConfigs = CachingConfigs(), - override val parameters: MediatorNodeParameterConfig = MediatorNodeParameterConfig(), - override val monitoring: NodeMonitoringConfig = NodeMonitoringConfig(), - override val topology: TopologyConfig = TopologyConfig(), -) extends MediatorNodeConfigCommon( - adminApi, - storage, - crypto, - init, - timeTracker, - sequencerClient, - caching, - parameters, - monitoring, - ) - with ConfigDefaults[DefaultPorts, CommunityMediatorNodeXConfig] { - - override val nodeTypeName: String = "mediatorx" - - override def replicationEnabled: Boolean = false - - override def withDefaults(ports: DefaultPorts): CommunityMediatorNodeXConfig = { - this - .focus(_.adminApi.internalPort) - .modify(ports.mediatorAdminApiPort.setDefaultPort) - } -} - -class MediatorNodeBootstrapX( - arguments: CantonNodeBootstrapCommonArguments[ - MediatorNodeConfigCommon, - MediatorNodeParameters, - MediatorMetrics, - ], - protected val replicaManager: MediatorReplicaManager, - override protected val mediatorRuntimeFactory: MediatorRuntimeFactory, -)( - implicit executionContext: ExecutionContextIdlenessExecutorService, - override protected implicit val executionSequencerFactory: ExecutionSequencerFactory, - scheduler: ScheduledExecutorService, - actorSystem: ActorSystem, -) extends CantonNodeBootstrapX[ - MediatorNodeX, - MediatorNodeConfigCommon, - MediatorNodeParameters, - MediatorMetrics, - ](arguments) - with MediatorNodeBootstrapCommon[MediatorNodeX, MediatorNodeConfigCommon] { - - override protected def member(uid: UniqueIdentifier): Member = MediatorId(uid) - - private val domainTopologyManager = new SingleUseCell[DomainTopologyManagerX]() - - override protected def sequencedTopologyStores: Seq[TopologyStoreX[DomainStore]] = - domainTopologyManager.get.map(_.store).toList - - override protected def sequencedTopologyManagers: Seq[DomainTopologyManagerX] = - domainTopologyManager.get.toList - - private class WaitForMediatorToDomainInit( - storage: Storage, - crypto: Crypto, - mediatorId: MediatorId, - authorizedTopologyManager: AuthorizedTopologyManagerX, - healthService: HealthService, - ) extends BootstrapStageWithStorage[MediatorNodeX, StartupNode, DomainId]( - "wait-for-mediator-to-domain-init", - bootstrapStageCallback, - storage, - config.init.autoInit, - ) - with GrpcMediatorInitializationServiceX.Callback { - - adminServerRegistry - .addServiceU( - MediatorInitializationServiceGrpc - .bindService( - new GrpcMediatorInitializationServiceX(this, loggerFactory), - executionContext, - ), - true, - ) - - protected val domainConfigurationStore = - MediatorDomainConfigurationStore(storage, timeouts, loggerFactory) - addCloseable(domainConfigurationStore) - addCloseable(deferredSequencerClientHealth) - - override protected def stageCompleted(implicit - traceContext: TraceContext - ): Future[Option[DomainId]] = domainConfigurationStore.fetchConfiguration.toOption.mapFilter { - case Some(res) => Some(res.domainId) - case None => None - }.value - - override protected def buildNextStage(domainId: DomainId): StartupNode = { - val domainTopologyStore = - TopologyStoreX(DomainStore(domainId), storage, timeouts, loggerFactory) - addCloseable(domainTopologyStore) - - val outboxQueue = new DomainOutboxQueue(loggerFactory) - val topologyManager = new DomainTopologyManagerX( - clock = clock, - crypto = crypto, - store = domainTopologyStore, - outboxQueue = outboxQueue, - enableTopologyTransactionValidation = config.topology.enableTopologyTransactionValidation, - timeouts = timeouts, - futureSupervisor = futureSupervisor, - loggerFactory = loggerFactory, - ) - - if (domainTopologyManager.putIfAbsent(topologyManager).nonEmpty) { - // TODO(#14048) how to handle this error properly? - throw new IllegalStateException("domainTopologyManager shouldn't have been set before") - } - - new StartupNode( - storage, - crypto, - mediatorId, - authorizedTopologyManager, - topologyManager, - domainId, - domainConfigurationStore, - domainTopologyStore, - healthService, - ) - } - - override protected def autoCompleteStage() - : EitherT[FutureUnlessShutdown, String, Option[DomainId]] = - EitherT.rightT(None) // this stage doesn't have auto-init - - override def initialize(request: InitializeMediatorRequestX)(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, InitializeMediatorResponseX] = { - if (isInitialized) { - logger.info( - "Received a request to initialize an already initialized mediator. Skipping initialization!" - ) - EitherT.pure(InitializeMediatorResponseX()) - } else { - completeWithExternal { - logger.info( - s"Assigning mediator to ${request.domainId} via sequencers ${request.sequencerConnections}" - ) - domainConfigurationStore - .saveConfiguration( - MediatorDomainConfiguration( - Fingerprint.tryCreate("unused"), // x-nodes do not need to return the initial key - request.domainId, - request.domainParameters, - request.sequencerConnections, - ) - ) - .leftMap(_.toString) - .map(_ => request.domainId) - }.map(_ => InitializeMediatorResponseX()) - } - } - - } - - private class StartupNode( - storage: Storage, - crypto: Crypto, - mediatorId: MediatorId, - authorizedTopologyManager: AuthorizedTopologyManagerX, - domainTopologyManager: DomainTopologyManagerX, - domainId: DomainId, - domainConfigurationStore: MediatorDomainConfigurationStore, - domainTopologyStore: TopologyStoreX[DomainStore], - healthService: HealthService, - ) extends BootstrapStage[MediatorNodeX, RunningNode[MediatorNodeX]]( - description = "Startup mediator node", - bootstrapStageCallback, - ) - with HasCloseContext { - - private val domainLoggerFactory = loggerFactory.append("domainId", domainId.toString) - - override protected def attempt()(implicit - traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, Option[RunningNode[MediatorNodeX]]] = { - - def topologyComponentFactory(domainId: DomainId, protocolVersion: ProtocolVersion): EitherT[ - Future, - String, - (TopologyTransactionProcessorCommon, DomainTopologyClientWithInit), - ] = - EitherT.right( - TopologyTransactionProcessorX.createProcessorAndClientForDomain( - domainTopologyStore, - domainId, - protocolVersion, - crypto.pureCrypto, - arguments.parameterConfig, - config.topology.enableTopologyTransactionValidation, - arguments.clock, - arguments.futureSupervisor, - domainLoggerFactory, - ) - ) - - val domainOutboxFactory = new DomainOutboxXFactory( - domainId = domainId, - memberId = mediatorId, - authorizedTopologyManager = authorizedTopologyManager, - domainTopologyManager = domainTopologyManager, - crypto = crypto, - topologyXConfig = config.topology, - timeouts = timeouts, - loggerFactory = domainLoggerFactory, - futureSupervisor = arguments.futureSupervisor, - ) - performUnlessClosingEitherU("starting up mediator node") { - val indexedStringStore = IndexedStringStore.create( - storage, - parameterConfig.cachingConfigs.indexedStrings, - timeouts, - domainLoggerFactory, - ) - addCloseable(indexedStringStore) - for { - domainId <- initializeNodePrerequisites( - storage, - crypto, - mediatorId, - () => domainConfigurationStore.fetchConfiguration.leftMap(_.toString), - domainConfigurationStore.saveConfiguration(_).leftMap(_.toString), - indexedStringStore, - topologyComponentFactory, - Some(TopologyManagerStatus.combined(authorizedTopologyManager, domainTopologyManager)), - maybeDomainTopologyStateInit = Some( - new StoreBasedDomainTopologyInitializationCallback(mediatorId, domainTopologyStore) - ), - maybeDomainOutboxFactory = Some(domainOutboxFactory), - ) - } yield { - val node = new MediatorNodeX( - arguments.config, - mediatorId, - domainId, - replicaManager, - storage, - clock, - domainLoggerFactory, - components = healthService.dependencies.map(_.toComponentStatus), - ) - addCloseable(node) - Some(new RunningNode(bootstrapStageCallback, node)) - } - } - } - } - - override protected def customNodeStages( - storage: Storage, - crypto: Crypto, - nodeId: UniqueIdentifier, - authorizedTopologyManager: AuthorizedTopologyManagerX, - healthServer: GrpcHealthReporter, - healthService: HealthService, - ): BootstrapStageOrLeaf[MediatorNodeX] = { - new WaitForMediatorToDomainInit( - storage, - crypto, - MediatorId(nodeId), - authorizedTopologyManager, - healthService, - ) - } - - override protected def onClosed(): Unit = { - super.onClosed() - } - -} - -object MediatorNodeBootstrapX { - val LoggerFactoryKeyName: String = "mediatorx" -} - -class MediatorNodeX( - config: MediatorNodeConfigCommon, - mediatorId: MediatorId, - domainId: DomainId, - protected[canton] val replicaManager: MediatorReplicaManager, - storage: Storage, - clock: Clock, - loggerFactory: NamedLoggerFactory, - components: => Seq[ComponentStatus], -) extends MediatorNodeCommon( - config, - mediatorId, - domainId, - replicaManager, - storage, - clock, - loggerFactory, - components, - ) { - - override def close(): Unit = { - super.close() - } - -} diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorReplicaManager.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorReplicaManager.scala index 369a43c4a..03c4895df 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorReplicaManager.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorReplicaManager.scala @@ -5,10 +5,10 @@ package com.digitalasset.canton.domain.mediator import cats.data.EitherT import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.domain.admin.v30.MediatorAdministrationServiceGrpc import com.digitalasset.canton.health.admin.data.TopologyQueueStatus import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, FlagCloseableAsync, SyncCloseable} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.mediator.admin.v30.MediatorAdministrationServiceGrpc import com.digitalasset.canton.networking.grpc.{CantonMutableHandlerRegistry, GrpcDynamicService} import com.digitalasset.canton.time.admin.v30 import com.digitalasset.canton.tracing.TraceContext @@ -48,10 +48,8 @@ trait MediatorReplicaManager extends NamedLogging with FlagCloseableAsync { def isActive: Boolean def getTopologyQueueStatus: TopologyQueueStatus = TopologyQueueStatus( - manager = - mediatorRuntime.flatMap(_.mediator.topologyManagerStatusO).map(_.queueSize).getOrElse(0), - dispatcher = - mediatorRuntime.flatMap(_.mediator.domainOutboxStatusO).map(_.queueSize).getOrElse(0), + manager = mediatorRuntime.map(_.mediator.topologyManagerStatus.queueSize).getOrElse(0), + dispatcher = mediatorRuntime.map(_.mediator.domainOutboxStatus.queueSize).getOrElse(0), clients = mediatorRuntime.map(x => x.mediator.topologyClient.numPendingChanges).getOrElse(0), ) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorRuntimeFactory.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorRuntimeFactory.scala index 1ead796ce..db2c4a66b 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorRuntimeFactory.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/MediatorRuntimeFactory.scala @@ -7,7 +7,6 @@ import cats.data.EitherT import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.{DomainTimeTrackerConfig, ProcessingTimeout} import com.digitalasset.canton.crypto.DomainSyncCryptoClient -import com.digitalasset.canton.domain.admin.v30.MediatorAdministrationServiceGrpc import com.digitalasset.canton.domain.mediator.store.{ FinalizedResponseStore, MediatorDeduplicationStore, @@ -15,8 +14,9 @@ import com.digitalasset.canton.domain.mediator.store.{ } import com.digitalasset.canton.domain.metrics.MediatorMetrics import com.digitalasset.canton.environment.CantonNodeParameters -import com.digitalasset.canton.lifecycle.FlagCloseable +import com.digitalasset.canton.lifecycle.{FlagCloseable, Lifecycle} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.mediator.admin.v30.MediatorAdministrationServiceGrpc import com.digitalasset.canton.networking.grpc.StaticGrpcServices import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.sequencing.client.RichSequencerClient @@ -44,7 +44,7 @@ trait MediatorRuntime extends FlagCloseable { def timeService: ServerServiceDefinition def enterpriseAdministrationService: ServerServiceDefinition - def domainOutboxX: Option[DomainOutboxHandle] + def domainOutbox: DomainOutboxHandle def start()(implicit ec: ExecutionContext, @@ -54,20 +54,17 @@ trait MediatorRuntime extends FlagCloseable { // start the domainOutbox only after the mediator has been started, otherwise // the future returned by startup will not be complete, because any topology transactions pushed to the // domain aren't actually processed until after the runtime is up and ... running - _ <- domainOutboxX - .map(_.startup().onShutdown(Left("DomainOutbox startup disrupted due to shutdown"))) - .getOrElse(EitherT.rightT[Future, String](())) + _ <- domainOutbox.startup().onShutdown(Left("DomainOutbox startup disrupted due to shutdown")) } yield () override protected def onClosed(): Unit = { - domainOutboxX.foreach(_.close()) - mediator.close() + Lifecycle.close(domainOutbox, mediator)(logger) } } private[mediator] class CommunityMediatorRuntime( override val mediator: Mediator, - override val domainOutboxX: Option[DomainOutboxHandle], + override val domainOutbox: DomainOutboxHandle, override protected val timeouts: ProcessingTimeout, protected val loggerFactory: NamedLoggerFactory, )(implicit protected val ec: ExecutionContext) @@ -95,8 +92,8 @@ trait MediatorRuntimeFactory { syncCrypto: DomainSyncCryptoClient, topologyClient: DomainTopologyClientWithInit, topologyTransactionProcessor: TopologyTransactionProcessorCommon, - topologyManagerStatusO: Option[TopologyManagerStatus], - domainOutboxXFactory: Option[DomainOutboxXFactory], + topologyManagerStatus: TopologyManagerStatus, + domainOutboxXFactory: DomainOutboxXFactory, timeTrackerConfig: DomainTimeTrackerConfig, nodeParameters: CantonNodeParameters, protocolVersion: ProtocolVersion, @@ -122,8 +119,8 @@ object CommunityMediatorRuntimeFactory extends MediatorRuntimeFactory { syncCrypto: DomainSyncCryptoClient, topologyClient: DomainTopologyClientWithInit, topologyTransactionProcessor: TopologyTransactionProcessorCommon, - topologyManagerStatusO: Option[TopologyManagerStatus], - domainOutboxXFactory: Option[DomainOutboxXFactory], + topologyManagerStatus: TopologyManagerStatus, + domainOutboxFactory: DomainOutboxXFactory, timeTrackerConfig: DomainTimeTrackerConfig, nodeParameters: CantonNodeParameters, protocolVersion: ProtocolVersion, @@ -162,8 +159,13 @@ object CommunityMediatorRuntimeFactory extends MediatorRuntimeFactory { loggerFactory, ) - val maybeOutboxX = domainOutboxXFactory - .map(_.create(protocolVersion, topologyClient, sequencerClient, clock, loggerFactory)) + val outbox = domainOutboxFactory.create( + protocolVersion, + topologyClient, + sequencerClient, + clock, + loggerFactory, + ) EitherT.pure[Future, String]( new CommunityMediatorRuntime( new Mediator( @@ -173,8 +175,8 @@ object CommunityMediatorRuntimeFactory extends MediatorRuntimeFactory { topologyClient, syncCrypto, topologyTransactionProcessor, - topologyManagerStatusO, - maybeOutboxX, + topologyManagerStatus, + outbox, timeTrackerConfig, state, sequencerCounterTrackerStore, @@ -185,7 +187,7 @@ object CommunityMediatorRuntimeFactory extends MediatorRuntimeFactory { metrics, loggerFactory, ), - maybeOutboxX, + outbox, nodeParameters.processingTimeouts, loggerFactory, ) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorRequest.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorRequest.scala index 0639a903d..fb7f889ba 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorRequest.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorRequest.scala @@ -3,9 +3,9 @@ package com.digitalasset.canton.domain.mediator.admin.gprc -import com.digitalasset.canton.domain.admin.v30 +import com.digitalasset.canton.mediator.admin.v30 import com.digitalasset.canton.protocol.StaticDomainParameters -import com.digitalasset.canton.sequencing.SequencerConnections +import com.digitalasset.canton.sequencing.{SequencerConnectionValidation, SequencerConnections} import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.topology.DomainId @@ -14,12 +14,14 @@ final case class InitializeMediatorRequestX( domainId: DomainId, domainParameters: StaticDomainParameters, sequencerConnections: SequencerConnections, + sequencerConnectionValidation: SequencerConnectionValidation, ) { def toProtoV30: v30.InitializeMediatorRequest = v30.InitializeMediatorRequest( domainId.toProtoPrimitive, Some(domainParameters.toProtoV30), Some(sequencerConnections.toProtoV30), + sequencerConnectionValidation.toProtoV30, ) } @@ -31,6 +33,7 @@ object InitializeMediatorRequestX { domainIdP, domainParametersP, sequencerConnectionsPO, + sequencerConnectionValidationPO, ) = requestP for { domainId <- DomainId.fromProtoPrimitive(domainIdP, "domain_id") @@ -40,10 +43,15 @@ object InitializeMediatorRequestX { sequencerConnections <- ProtoConverter .required("sequencerConnections", sequencerConnectionsPO) .flatMap(SequencerConnections.fromProtoV30) + sequencerConnectionValidation <- SequencerConnectionValidation.fromProtoV30( + sequencerConnectionValidationPO + ) + } yield InitializeMediatorRequestX( domainId, domainParameters, sequencerConnections, + sequencerConnectionValidation, ) } } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorResponse.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorResponse.scala index bdf5d0dd1..28ba583a1 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorResponse.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/admin/gprc/InitializeMediatorResponse.scala @@ -3,7 +3,7 @@ package com.digitalasset.canton.domain.mediator.admin.gprc -import com.digitalasset.canton.domain.admin.v30 +import com.digitalasset.canton.mediator.admin.v30 import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult final case class InitializeMediatorResponseX() { diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/service/GrpcMediatorInitializationServiceX.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/service/GrpcMediatorInitializationServiceX.scala index 3cc82150d..597d4ae15 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/service/GrpcMediatorInitializationServiceX.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/mediator/service/GrpcMediatorInitializationServiceX.scala @@ -7,11 +7,6 @@ import cats.data.EitherT import cats.syntax.either.* import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.domain.Domain.FailedToInitialiseDomainNode -import com.digitalasset.canton.domain.admin.v30 -import com.digitalasset.canton.domain.admin.v30.{ - InitializeMediatorRequest, - InitializeMediatorResponse, -} import com.digitalasset.canton.domain.mediator.admin.gprc.{ InitializeMediatorRequestX, InitializeMediatorResponseX, @@ -19,6 +14,11 @@ import com.digitalasset.canton.domain.mediator.admin.gprc.{ import com.digitalasset.canton.error.CantonError import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.mediator.admin.v30 +import com.digitalasset.canton.mediator.admin.v30.{ + InitializeMediatorRequest, + InitializeMediatorResponse, +} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeCommon.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeCommon.scala index 7dd80c012..a357c2368 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeCommon.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeCommon.scala @@ -35,7 +35,11 @@ import com.digitalasset.canton.time.* import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit import com.digitalasset.canton.topology.processing.TopologyTransactionProcessorCommon -import com.digitalasset.canton.topology.store.TopologyStateForInitializationService +import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore +import com.digitalasset.canton.topology.store.{ + TopologyStateForInitializationService, + TopologyStoreX, +} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.EitherTUtil import io.grpc.ServerServiceDefinition @@ -108,6 +112,7 @@ trait SequencerNodeBootstrapCommon[ domainId: DomainId, sequencerId: SequencerId, staticMembersToRegister: Seq[Member], + topologyStore: TopologyStoreX[DomainStore], topologyClient: DomainTopologyClientWithInit, topologyProcessor: TopologyTransactionProcessorCommon, topologyManagerStatus: Option[TopologyManagerStatus], @@ -158,6 +163,7 @@ trait SequencerNodeBootstrapCommon[ arguments.metrics, domainId, syncCrypto, + topologyStore, topologyClient, topologyProcessor, topologyManagerStatus, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeX.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeX.scala index fc7c1a306..9c4af16c6 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeX.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerNodeX.scala @@ -10,11 +10,10 @@ import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.config.NonNegativeFiniteDuration as _ import com.digitalasset.canton.crypto.Crypto import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30.SequencerInitializationServiceGrpc import com.digitalasset.canton.domain.metrics.SequencerMetrics import com.digitalasset.canton.domain.sequencing.admin.grpc.{ - InitializeSequencerRequestX, - InitializeSequencerResponseX, + InitializeSequencerRequest, + InitializeSequencerResponse, } import com.digitalasset.canton.domain.sequencing.authentication.MemberAuthenticationServiceFactory import com.digitalasset.canton.domain.sequencing.config.{ @@ -30,11 +29,12 @@ import com.digitalasset.canton.domain.sequencing.service.GrpcSequencerInitializa import com.digitalasset.canton.domain.server.DynamicDomainGrpcServer import com.digitalasset.canton.environment.* import com.digitalasset.canton.health.{ComponentStatus, GrpcHealthReporter, HealthService} -import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, HasCloseContext, Lifecycle} +import com.digitalasset.canton.lifecycle.{FutureUnlessShutdown, HasCloseContext} import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.protocol.DomainParameters.MaxRequestSize import com.digitalasset.canton.protocol.StaticDomainParameters import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.sequencer.admin.v30.SequencerInitializationServiceGrpc import com.digitalasset.canton.time.* import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.processing.{EffectiveTime, TopologyTransactionProcessorX} @@ -89,8 +89,6 @@ class SequencerNodeBootstrapX( Sequencer, NamedLoggerFactory, ) => Option[ServerServiceDefinition], - // Allow to pass in additional resources that need to be closed as part of the node bootstrap closing - closeables: Seq[AutoCloseable] = Seq.empty, )(implicit executionContext: ExecutionContextIdlenessExecutorService, scheduler: ScheduledExecutorService, @@ -302,14 +300,14 @@ class SequencerNodeBootstrapX( .toMap } - override def initialize(request: InitializeSequencerRequestX)(implicit + override def initialize(request: InitializeSequencerRequest)(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, InitializeSequencerResponseX] = { + ): EitherT[FutureUnlessShutdown, String, InitializeSequencerResponse] = { if (isInitialized) { logger.info( "Received a request to initialize an already initialized sequencer. Skipping initialization!" ) - EitherT.pure(InitializeSequencerResponseX(replicated = config.sequencer.supportsReplicas)) + EitherT.pure(InitializeSequencerResponse(replicated = config.sequencer.supportsReplicas)) } else { completeWithExternalUS { logger.info( @@ -383,7 +381,7 @@ class SequencerNodeBootstrapX( .mapK(FutureUnlessShutdown.outcomeK) } yield (request.domainParameters, sequencerFactory, topologyManager) }.map { _ => - InitializeSequencerResponseX(replicated = config.sequencer.supportsReplicas) + InitializeSequencerResponse(replicated = config.sequencer.supportsReplicas) } } } @@ -502,6 +500,7 @@ class SequencerNodeBootstrapX( domainId, sequencerId, Seq(sequencerId) ++ membersToRegister, + domainTopologyStore, topologyClient, topologyProcessor, Some(TopologyManagerStatus.combined(authorizedTopologyManager, domainTopologyManager)), @@ -548,7 +547,6 @@ class SequencerNodeBootstrapX( (healthService.dependencies ++ sequencerPublicApiHealthService.dependencies).map( _.toComponentStatus ), - closeables, ) addCloseable(node) Some(new RunningNode(bootstrapStageCallback, node)) @@ -568,7 +566,6 @@ class SequencerNodeX( loggerFactory: NamedLoggerFactory, sequencerNodeServer: DynamicDomainGrpcServer, components: => Seq[ComponentStatus], - closeables: Seq[AutoCloseable], )(implicit executionContext: ExecutionContextExecutorService) extends SequencerNodeCommon( config, @@ -579,11 +576,4 @@ class SequencerNodeX( loggerFactory, sequencerNodeServer, components, - ) { - - override def close(): Unit = { - super.close() - // TODO(#17222): Close the additional resources (e.g. KMS) last to avoid crypto usage after shutdown - Lifecycle.close(closeables*)(logger) - } -} + ) {} diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala index c773b100d..215b75b31 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntime.scala @@ -8,7 +8,6 @@ import cats.syntax.parallel.* import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.crypto.DomainSyncCryptoClient -import com.digitalasset.canton.domain.admin.v30.SequencerVersionServiceGrpc import com.digitalasset.canton.domain.api.v30 import com.digitalasset.canton.domain.config.PublicServerConfig import com.digitalasset.canton.domain.metrics.SequencerMetrics @@ -40,6 +39,7 @@ import com.digitalasset.canton.protocol.{ StaticDomainParameters, } import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.sequencer.admin.v30.SequencerVersionServiceGrpc import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntimeForSeparateNode.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntimeForSeparateNode.scala index 2d7d82acd..f51f0c978 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntimeForSeparateNode.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/SequencerRuntimeForSeparateNode.scala @@ -10,7 +10,6 @@ import com.digitalasset.canton.DiscardOps import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.{DomainTimeTrackerConfig, TestingConfigInternal} import com.digitalasset.canton.crypto.DomainSyncCryptoClient -import com.digitalasset.canton.domain.admin.v30.SequencerAdministrationServiceGrpc import com.digitalasset.canton.domain.config.PublicServerConfig import com.digitalasset.canton.domain.metrics.SequencerMetrics import com.digitalasset.canton.domain.sequencing.authentication.MemberAuthenticationServiceFactory @@ -24,6 +23,7 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.protocol.StaticDomainParameters import com.digitalasset.canton.protocol.messages.DefaultOpenEnvelope import com.digitalasset.canton.resource.Storage +import com.digitalasset.canton.sequencer.admin.v30.SequencerAdministrationServiceGrpc import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports import com.digitalasset.canton.sequencing.client.* import com.digitalasset.canton.sequencing.handlers.{ @@ -49,7 +49,11 @@ import com.digitalasset.canton.time.{Clock, DomainTimeTracker} import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit import com.digitalasset.canton.topology.processing.TopologyTransactionProcessorCommon -import com.digitalasset.canton.topology.store.TopologyStateForInitializationService +import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore +import com.digitalasset.canton.topology.store.{ + TopologyStateForInitializationService, + TopologyStoreX, +} import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.traffic.TrafficControlProcessor import io.grpc.ServerServiceDefinition @@ -73,6 +77,7 @@ class SequencerRuntimeForSeparateNode( metrics: SequencerMetrics, domainId: DomainId, syncCrypto: DomainSyncCryptoClient, + topologyStore: TopologyStoreX[DomainStore], topologyClient: DomainTopologyClientWithInit, topologyProcessor: TopologyTransactionProcessorCommon, topologyManagerStatusO: Option[TopologyManagerStatus], @@ -233,7 +238,15 @@ class SequencerRuntimeForSeparateNode( private val eventHandler = StripSignature(handler(domainId)) private val sequencerAdministrationService = - new GrpcSequencerAdministrationService(sequencer, client, loggerFactory) + new GrpcSequencerAdministrationService( + sequencer, + client, + topologyStore, + topologyClient, + timeTracker, + staticDomainParameters, + loggerFactory, + ) override def registerAdminGrpcServices( register: ServerServiceDefinition => Unit diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerRequest.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerRequest.scala index b12298afa..d401dc640 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerRequest.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerRequest.scala @@ -3,103 +3,12 @@ package com.digitalasset.canton.domain.sequencing.admin.grpc -import cats.syntax.traverse.* -import com.digitalasset.canton.domain.admin.v30 import com.digitalasset.canton.domain.sequencing.sequencer.SequencerSnapshot import com.digitalasset.canton.protocol.StaticDomainParameters -import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX -import com.digitalasset.canton.topology.store.{ - StoredTopologyTransactionX, - StoredTopologyTransactionsX, -} -import com.digitalasset.canton.topology.transaction.{ - SignedTopologyTransactionX, - TopologyChangeOpX, - TopologyMappingX, -} -import com.google.protobuf.ByteString -final case class InitializeSequencerRequestX( +final case class InitializeSequencerRequest( topologySnapshot: GenericStoredTopologyTransactionsX, domainParameters: StaticDomainParameters, - sequencerSnapshot: Option[SequencerSnapshot] = - None, // this will likely be a different type for X nodes -) { - def toProtoV30: v30.InitializeSequencerRequest = { - v30.InitializeSequencerRequest( - Some(topologySnapshot.toProtoV30), - Some(domainParameters.toProtoV30), - sequencerSnapshot.fold(ByteString.EMPTY)(_.toProtoVersioned.toByteString), - ) - } -} - -object InitializeSequencerRequestX { - - private[sequencing] def fromProtoV30( - request: v30.InitializeSequencerRequest - ): ParsingResult[InitializeSequencerRequestX] = - for { - domainParameters <- ProtoConverter.parseRequired( - StaticDomainParameters.fromProtoV30, - "domain_parameters", - request.domainParameters, - ) - topologySnapshotAddO <- request.topologySnapshot.traverse( - StoredTopologyTransactionsX.fromProtoV30 - ) - snapshotO <- Option - .when(!request.snapshot.isEmpty)( - SequencerSnapshot.fromByteString(domainParameters.protocolVersion)( - request.snapshot - ) - ) - .sequence - } yield InitializeSequencerRequestX( - topologySnapshotAddO - .getOrElse(StoredTopologyTransactionsX.empty) - .collectOfType[TopologyChangeOpX.Replace], - domainParameters, - snapshotO, - ) - - private[sequencing] def fromProtoV30( - request: v30.InitializeSequencerVersionedRequest - ): ParsingResult[InitializeSequencerRequestX] = - for { - domainParameters <- ProtoConverter.parseRequired( - StaticDomainParameters.fromProtoV30, - "domain_parameters", - request.domainParameters, - ) - topologySnapshotAdd <- StoredTopologyTransactionsX - .fromByteString(request.topologySnapshot) - - // we need to use the initial time for the sequencer - genesisState = topologySnapshotAdd.result.map(_.transaction) - toStore = StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]( - genesisState.map(signed => - StoredTopologyTransactionX( - SequencedTime(SignedTopologyTransactionX.InitialTopologySequencingTime), - EffectiveTime(SignedTopologyTransactionX.InitialTopologySequencingTime), - None, - signed, - ) - ) - ) - snapshotO <- Option - .when(!request.snapshot.isEmpty)( - SequencerSnapshot.fromByteString(domainParameters.protocolVersion)( - request.snapshot - ) - ) - .sequence - } yield InitializeSequencerRequestX( - toStore, - domainParameters, - snapshotO, - ) -} + sequencerSnapshot: Option[SequencerSnapshot] = None, +) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerResponse.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerResponse.scala index d1112b464..78617233a 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerResponse.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/admin/grpc/InitializeSequencerResponse.scala @@ -3,19 +3,4 @@ package com.digitalasset.canton.domain.sequencing.admin.grpc -import com.digitalasset.canton.domain.admin.v30 -import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult - -final case class InitializeSequencerResponseX(replicated: Boolean) { - def toProtoV30: v30.InitializeSequencerResponse = - v30.InitializeSequencerResponse(replicated) -} - -object InitializeSequencerResponseX { - def fromProtoV30( - response: v30.InitializeSequencerResponse - ): ParsingResult[InitializeSequencerResponseX] = { - val v30.InitializeSequencerResponse(replicated) = response - Right(InitializeSequencerResponseX(replicated)) - } -} +final case class InitializeSequencerResponse(replicated: Boolean) diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationStore.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationStore.scala index 8368bb799..4a0604816 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationStore.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/authentication/MemberAuthenticationStore.scala @@ -98,56 +98,65 @@ class InMemoryMemberAuthenticationStore extends MemberAuthenticationStore { override def saveNonce( storedNonce: StoredNonce - )(implicit traceContext: TraceContext): Future[Unit] = blocking { - lock.synchronized { + )(implicit traceContext: TraceContext): Future[Unit] = { + blocking(lock.synchronized { nonces += storedNonce - Future.unit - } + () + }) + Future.unit } override def fetchAndRemoveNonce(member: Member, nonce: Nonce)(implicit traceContext: TraceContext - ): Future[Option[StoredNonce]] = blocking(lock.synchronized { - val storedNonce = nonces.find(n => n.member == member && n.nonce == nonce) - - storedNonce.foreach(nonces.-=) // remove the nonce - + ): Future[Option[StoredNonce]] = { + val storedNonce = blocking(lock.synchronized { + val storedNonce = nonces.find(n => n.member == member && n.nonce == nonce) + storedNonce.foreach(nonces.-=) // remove the nonce + storedNonce + }) Future.successful(storedNonce) - }) + } override def saveToken( token: StoredAuthenticationToken - )(implicit traceContext: TraceContext): Future[Unit] = blocking { - lock.synchronized { + )(implicit traceContext: TraceContext): Future[Unit] = { + blocking(lock.synchronized { tokens += token - Future.unit - } + () + }) + Future.unit } override def fetchTokens(member: Member)(implicit traceContext: TraceContext - ): Future[Seq[StoredAuthenticationToken]] = blocking(lock.synchronized { - val memberTokens = tokens.filter(_.member == member) - - Future.successful(memberTokens.toSeq) - }) + ): Future[Seq[StoredAuthenticationToken]] = { + val memberTokens = blocking(lock.synchronized { + tokens.filter(_.member == member).toSeq + }) + Future.successful(memberTokens) + } override def expireNoncesAndTokens( timestamp: CantonTimestamp - )(implicit traceContext: TraceContext): Future[Unit] = blocking { - lock.synchronized { + )(implicit traceContext: TraceContext): Future[Unit] = { + blocking(lock.synchronized { nonces --= nonces.filter(_.expireAt <= timestamp) tokens --= tokens.filter(_.expireAt <= timestamp) - Future.unit - } + () + }) + Future.unit } - override def invalidateMember(member: Member)(implicit traceContext: TraceContext): Future[Unit] = + override def invalidateMember( + member: Member + )(implicit traceContext: TraceContext): Future[Unit] = { blocking(lock.synchronized { nonces --= nonces.filter(_.member == member) tokens --= tokens.filter(_.member == member) - Future.unit + () }) + Future.unit + } override def close(): Unit = () } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/OnboardingStateForSequencer.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/OnboardingStateForSequencer.scala new file mode 100644 index 000000000..1bdc54cb1 --- /dev/null +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/OnboardingStateForSequencer.scala @@ -0,0 +1,82 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.domain.sequencing.sequencer + +import com.digitalasset.canton.protocol.StaticDomainParameters +import com.digitalasset.canton.sequencer.admin.v30 +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX +import com.digitalasset.canton.topology.store.StoredTopologyTransactionsX.GenericStoredTopologyTransactionsX +import com.digitalasset.canton.version.* + +final case class OnboardingStateForSequencer( + topologySnapshot: GenericStoredTopologyTransactionsX, + staticDomainParameters: StaticDomainParameters, + sequencerSnapshot: SequencerSnapshot, +)( + override val representativeProtocolVersion: RepresentativeProtocolVersion[ + OnboardingStateForSequencer.type + ] +) extends HasProtocolVersionedWrapper[OnboardingStateForSequencer] { + + override protected val companionObj: OnboardingStateForSequencer.type = + OnboardingStateForSequencer + + private def toProtoV30: v30.OnboardingStateForSequencer = v30.OnboardingStateForSequencer( + Some(topologySnapshot.toProtoV30), + Some(staticDomainParameters.toProtoV30), + Some(sequencerSnapshot.toProtoV30), + ) +} + +object OnboardingStateForSequencer + extends HasProtocolVersionedCompanion[OnboardingStateForSequencer] { + override def name: String = "onboarding state for sequencer" + + def apply( + topologySnapshot: GenericStoredTopologyTransactionsX, + staticDomainParameters: StaticDomainParameters, + sequencerSnapshot: SequencerSnapshot, + protocolVersion: ProtocolVersion, + ): OnboardingStateForSequencer = + OnboardingStateForSequencer(topologySnapshot, staticDomainParameters, sequencerSnapshot)( + protocolVersionRepresentativeFor(protocolVersion) + ) + + override val supportedProtoVersions: SupportedProtoVersions = SupportedProtoVersions( + ProtoVersion(30) -> VersionedProtoConverter(ProtocolVersion.v30)( + v30.OnboardingStateForSequencer + )( + supportedProtoVersion(_)(fromProtoV30), + _.toProtoV30.toByteString, + ) + ) + + private def fromProtoV30( + value: v30.OnboardingStateForSequencer + ): ParsingResult[OnboardingStateForSequencer] = { + for { + topologySnapshot <- ProtoConverter.parseRequired( + StoredTopologyTransactionsX.fromProtoV30, + "topology_snapshot", + value.topologySnapshot, + ) + staticDomainParams <- ProtoConverter.parseRequired( + StaticDomainParameters.fromProtoV30, + "static_domain_parameters", + value.staticDomainParameters, + ) + sequencerSnapshot <- ProtoConverter.parseRequired( + SequencerSnapshot.fromProtoV30, + "sequencer_snapshot", + value.sequencerSnapshot, + ) + rpv <- protocolVersionRepresentativeFor(ProtoVersion(30)) + } yield OnboardingStateForSequencer(topologySnapshot, staticDomainParams, sequencerSnapshot)( + rpv + ) + } + +} diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerPruningStatus.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerPruningStatus.scala index e3a7ca027..afc30c4d2 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerPruningStatus.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerPruningStatus.scala @@ -5,21 +5,38 @@ package com.digitalasset.canton.domain.sequencing.sequencer import cats.syntax.traverse.* import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30 import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencer.admin.v30 import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.time.NonNegativeFiniteDuration import com.digitalasset.canton.topology.{Member, UnauthenticatedMemberId} +trait AbstractSequencerMemberStatus extends Product with Serializable { + def registeredAt: CantonTimestamp + def lastAcknowledged: Option[CantonTimestamp] + + def safePruningTimestamp: CantonTimestamp = + lastAcknowledged.getOrElse(registeredAt) +} + +final case class InternalSequencerMemberStatus( + override val registeredAt: CantonTimestamp, + override val lastAcknowledged: Option[CantonTimestamp], +) extends AbstractSequencerMemberStatus + with PrettyPrinting { + override def pretty: Pretty[InternalSequencerMemberStatus] = prettyOfClass( + param("registered at", _.registeredAt), + paramIfDefined("last acknowledged", _.lastAcknowledged), + ) +} + final case class SequencerMemberStatus( member: Member, registeredAt: CantonTimestamp, lastAcknowledged: Option[CantonTimestamp], enabled: Boolean = true, -) extends PrettyPrinting { - - def safePruningTimestamp: CantonTimestamp = - lastAcknowledged.getOrElse(registeredAt) +) extends AbstractSequencerMemberStatus + with PrettyPrinting { def toProtoV30: v30.SequencerMemberStatus = v30.SequencerMemberStatus( @@ -46,15 +63,13 @@ final case class SequencerClients( trait AbstractSequencerPruningStatus { - /** the earliest timestamp that can be read */ - def lowerBound: CantonTimestamp + /** Disabled members */ + def disabledClients: SequencerClients - /** details of registered members */ - def members: Seq[SequencerMemberStatus] - - lazy val disabledClients: SequencerClients = SequencerClients( - members = members.filterNot(_.enabled).map(_.member).toSet - ) + /** The earliest [[com.digitalasset.canton.domain.sequencing.sequencer.AbstractSequencerMemberStatus.safePruningTimestamp]] + * of any enabled member + */ + def earliestMemberSafePruningTimestamp: Option[CantonTimestamp] /** Using the member details, calculate based on their acknowledgements when is the latest point we can * safely prune without losing any data that may still be read. @@ -62,24 +77,43 @@ trait AbstractSequencerPruningStatus { * @param timestampForNoMembers The timestamp to return if there are no unignored members */ def safePruningTimestampFor(timestampForNoMembers: CantonTimestamp): CantonTimestamp = { - val earliestMemberTs = members.filter(_.enabled).map(_.safePruningTimestamp).minOption - earliestMemberTs.getOrElse(timestampForNoMembers) + earliestMemberSafePruningTimestamp.getOrElse(timestampForNoMembers) } } private[canton] final case class InternalSequencerPruningStatus( - override val lowerBound: CantonTimestamp, - membersMap: Map[Member, SequencerMemberStatus], + lowerBound: CantonTimestamp, + membersMap: Map[Member, InternalSequencerMemberStatus], + disabledMembers: Set[Member], ) extends AbstractSequencerPruningStatus with PrettyPrinting { - override val members: Seq[SequencerMemberStatus] = membersMap.values.toSeq + override def disabledClients: SequencerClients = SequencerClients(disabledMembers) + + override def earliestMemberSafePruningTimestamp: Option[CantonTimestamp] = + membersMap.view + .filterKeys(!disabledMembers.contains(_)) + .values + .map(_.safePruningTimestamp) + .minOption + + def members: Seq[SequencerMemberStatus] = membersMap.map { + case (member, InternalSequencerMemberStatus(registeredAt, lastAcknowledged)) => + SequencerMemberStatus( + member, + registeredAt, + lastAcknowledged, + !disabledMembers.contains(member), + ) + }.toSeq + def toSequencerPruningStatus(now: CantonTimestamp): SequencerPruningStatus = SequencerPruningStatus(lowerBound, now, members) override def pretty: Pretty[InternalSequencerPruningStatus] = prettyOfClass( param("lower bound", _.lowerBound), - param("members", _.members), + param("members", _.membersMap), + param("disabled", _.disabledMembers), ) } @@ -87,13 +121,19 @@ private[canton] object InternalSequencerPruningStatus { /** Sentinel value to use for Sequencers that don't yet support the status endpoint */ val Unimplemented = - InternalSequencerPruningStatus(CantonTimestamp.MinValue, membersMap = Map.empty) + InternalSequencerPruningStatus(CantonTimestamp.MinValue, membersMap = Map.empty, Set.empty) def apply( lowerBound: CantonTimestamp, members: Seq[SequencerMemberStatus], ): InternalSequencerPruningStatus = { - InternalSequencerPruningStatus(lowerBound, members.map(m => m.member -> m).toMap) + InternalSequencerPruningStatus( + lowerBound, + members + .map(m => m.member -> InternalSequencerMemberStatus(m.registeredAt, m.lastAcknowledged)) + .toMap, + members.view.filterNot(_.enabled).map(_.member).toSet, + ) } } @@ -102,14 +142,21 @@ private[canton] object InternalSequencerPruningStatus { * @param now the current time of the sequencer clock */ final case class SequencerPruningStatus( - override val lowerBound: CantonTimestamp, + lowerBound: CantonTimestamp, now: CantonTimestamp, - override val members: Seq[SequencerMemberStatus], + members: Seq[SequencerMemberStatus], ) extends AbstractSequencerPruningStatus with PrettyPrinting { + override def disabledClients: SequencerClients = SequencerClients( + members = members.filterNot(_.enabled).map(_.member).toSet + ) + + override def earliestMemberSafePruningTimestamp: Option[CantonTimestamp] = + members.filter(_.enabled).map(_.safePruningTimestamp).minOption + def toInternal: InternalSequencerPruningStatus = - InternalSequencerPruningStatus(lowerBound, members.map(m => m.member -> m).toMap) + InternalSequencerPruningStatus(lowerBound, members) /** Using the member details, calculate based on their acknowledgements when is the latest point we can * safely prune without losing any data that may still be read. diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerSnapshot.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerSnapshot.scala index 3309c1c38..a293608d1 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerSnapshot.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/SequencerSnapshot.scala @@ -7,10 +7,10 @@ import cats.syntax.either.* import cats.syntax.traverse.* import com.digitalasset.canton.crypto.Signature import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30 import com.digitalasset.canton.domain.sequencing.sequencer.InFlightAggregation.AggregationBySender import com.digitalasset.canton.domain.sequencing.sequencer.traffic.MemberTrafficSnapshot import com.digitalasset.canton.domain.sequencing.traffic.TrafficBalance +import com.digitalasset.canton.sequencer.admin.v30 import com.digitalasset.canton.sequencing.protocol.{AggregationId, AggregationRule} import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult @@ -104,9 +104,7 @@ object SequencerSnapshot extends HasProtocolVersionedCompanion[SequencerSnapshot additional, trafficState, trafficBalances, - )( - protocolVersionRepresentativeFor(protocolVersion) - ) + )(protocolVersionRepresentativeFor(protocolVersion)) def unimplemented(protocolVersion: ProtocolVersion): SequencerSnapshot = SequencerSnapshot( CantonTimestamp.MinValue, diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala index db9ef6b4e..1cb9cda0b 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencer.scala @@ -14,8 +14,8 @@ import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.block.data.SequencerBlockStore import com.digitalasset.canton.domain.block.{ BlockSequencerStateManagerBase, - BlockUpdateGenerator, - RawLedgerBlock, + BlockUpdateGeneratorImpl, + LocalBlockUpdate, } import com.digitalasset.canton.domain.metrics.SequencerMetrics import com.digitalasset.canton.domain.sequencing.sequencer.PruningError.UnsafePruningPoint @@ -43,7 +43,7 @@ import com.digitalasset.canton.sequencing.client.SequencerClient import com.digitalasset.canton.sequencing.protocol.* import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.* -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.traffic.TrafficControlErrors.TrafficControlError import com.digitalasset.canton.traffic.{ MemberTrafficStatus, @@ -51,7 +51,7 @@ import com.digitalasset.canton.traffic.{ TrafficControlErrors, } import com.digitalasset.canton.util.EitherTUtil.condUnitET -import com.digitalasset.canton.util.{EitherTUtil, PekkoUtil} +import com.digitalasset.canton.util.{EitherTUtil, PekkoUtil, SimpleExecutionQueue} import com.digitalasset.canton.version.ProtocolVersion import io.opentelemetry.api.trace.Tracer import org.apache.pekko.stream.* @@ -95,6 +95,13 @@ class BlockSequencer( override def timeouts: ProcessingTimeout = processingTimeouts + private[sequencer] val pruningQueue = new SimpleExecutionQueue( + "block-sequencer-pruning-queue", + futureSupervisor, + timeouts, + loggerFactory, + ) + override lazy val rateLimitManager: Option[SequencerRateLimitManager] = Some( blockRateLimitManager ) @@ -104,11 +111,11 @@ class BlockSequencer( override private[sequencing] def firstSequencerCounterServeableForSequencer: SequencerCounter = stateManager.firstSequencerCounterServableForSequencer - noTracingLogger.info( - s"Subscribing to block source from ${stateManager.getHeadState.block.height}" - ) private val (killSwitch, localEventsQueue, done) = { - val updateGenerator = new BlockUpdateGenerator( + val headState = stateManager.getHeadState + noTracingLogger.info(s"Subscribing to block source from ${headState.block.height}") + + val updateGenerator = new BlockUpdateGeneratorImpl( domainId, protocolVersion, cryptoApi, @@ -118,45 +125,25 @@ class BlockSequencer( orderingTimeFixMode, loggerFactory, )(CloseContext(cryptoApi)) + + val driverSource = blockSequencerOps + .subscribe()(TraceContext.empty) + // Explicit async to make sure that the block processing runs in parallel with the block retrieval + .async + .map(updateGenerator.extractBlockEvents) + .via(stateManager.processBlock(updateGenerator)) + + val localSource = Source + .queue[Traced[BlockSequencer.LocalEvent]](bufferSize = 1000, OverflowStrategy.backpressure) + .map(_.map(event => LocalBlockUpdate(event))) + val combinedSource = Source.combineMat(driverSource, localSource)(Merge(_))(Keep.both) + val combinedSourceWithBlockHandling = combinedSource.async + .via(stateManager.applyBlockUpdate) + .map { case Traced(lastTs) => + metrics.sequencerClient.handler.delay.updateValue((clock.now - lastTs).toMillis) + } val ((killSwitch, localEventsQueue), done) = PekkoUtil.runSupervised( ex => logger.error("Fatally failed to handle state changes", ex)(TraceContext.empty), { - val driverSource = blockSequencerOps - .subscribe()(TraceContext.empty) - .map(block => Right(block): Either[BlockSequencer.LocalEvent, RawLedgerBlock]) - val localSource = Source - .queue[BlockSequencer.LocalEvent](bufferSize = 1000, OverflowStrategy.backpressure) - .map(event => Left(event): Either[BlockSequencer.LocalEvent, RawLedgerBlock]) - val combinedSource = Source - .combineMat( - driverSource, - localSource, - )(Merge(_))(Keep.both) - val combinedSourceWithBlockHandling = combinedSource - .mapAsync( - // `stateManager.handleBlock` in `handleBlockContents` must execute sequentially. - parallelism = 1 - ) { - case Right(blockEvents) => - implicit val tc: TraceContext = - blockEvents.events.headOption.map(_.traceContext).getOrElse(TraceContext.empty) - logger.debug( - s"Handle block with height=${blockEvents.blockHeight} with num-events=${blockEvents.events.length}" - ) - stateManager - .handleBlock( - updateGenerator.asBlockUpdate(blockEvents) - ) - .map { state => - metrics.sequencerClient.handler.delay - .updateValue((clock.now - state.latestBlock.lastTs).toMillis) - } - .onShutdown( - logger.debug( - s"Block with height=${blockEvents.blockHeight} wasn't handled because sequencer is shutting down" - ) - ) - case Left(localEvent) => stateManager.handleLocalEvent(localEvent)(TraceContext.empty) - } combinedSourceWithBlockHandling.toMat(Sink.ignore)(Keep.both) }, ) @@ -381,24 +368,42 @@ class BlockSequencer( traceContext: TraceContext ): EitherT[Future, PruningError, String] = { - val pruningF = futureSupervisor.supervised( + val (isNew, pruningF) = stateManager.waitForPruningToComplete(requestedTimestamp) + val supervisedPruningF = futureSupervisor.supervised( s"Waiting for local pruning operation at $requestedTimestamp to complete" - )(stateManager.waitForPruningToComplete(requestedTimestamp)) + )(pruningF) - for { - status <- EitherT.right[PruningError](this.pruningStatus) - _ <- condUnitET[Future]( - requestedTimestamp <= status.safePruningTimestamp, - UnsafePruningPoint(requestedTimestamp, status.safePruningTimestamp): PruningError, + if (isNew) + for { + status <- EitherT.right[PruningError](this.pruningStatus) + _ <- condUnitET[Future]( + requestedTimestamp <= status.safePruningTimestamp, + UnsafePruningPoint(requestedTimestamp, status.safePruningTimestamp): PruningError, + ) + msg <- EitherT.right( + pruningQueue + .execute(store.prune(requestedTimestamp), s"pruning sequencer at $requestedTimestamp") + .unwrap + .map( + _.onShutdown(s"pruning at $requestedTimestamp canceled because we're shutting down") + ) + ) + _ <- EitherT.right( + placeLocalEvent(BlockSequencer.UpdateInitialMemberCounters(requestedTimestamp)) + ) + _ <- EitherT.right(supervisedPruningF) + } yield msg + else + EitherT.right( + supervisedPruningF.map(_ => + s"Pruning at $requestedTimestamp is already happening due to an earlier request" + ) ) - _ <- EitherT.right(placeLocalEvent(BlockSequencer.Prune(requestedTimestamp))) - msg <- EitherT.right(pruningF) - } yield msg } private def placeLocalEvent(event: BlockSequencer.LocalEvent)(implicit traceContext: TraceContext - ): Future[Unit] = localEventsQueue.offer(event).flatMap { + ): Future[Unit] = localEventsQueue.offer(Traced(event)).flatMap { case QueueOfferResult.Enqueued => Future.unit case QueueOfferResult.Dropped => // this should never happen Future.failed[Unit](new RuntimeException(s"Request queue is full. cannot take local $event")) @@ -427,7 +432,7 @@ class BlockSequencer( for { ledgerStatus <- blockSequencerOps.health isStorageActive = storage.isActive - _ = logger.debug(s"Storage active: ${storage.isActive}") + _ = logger.trace(s"Storage active: ${storage.isActive}") } yield { if (!ledgerStatus.isActive) SequencerHealthStatus(isActive = false, ledgerStatus.description) else @@ -441,6 +446,7 @@ class BlockSequencer( import TraceContext.Implicits.Empty.* logger.debug(s"$name sequencer shutting down") Seq[AsyncOrSyncCloseable]( + SyncCloseable("pruningQueue", pruningQueue.close()), SyncCloseable("stateManager.close()", stateManager.close()), SyncCloseable("localEventsQueue.complete", localEventsQueue.complete()), AsyncCloseable( @@ -461,7 +467,7 @@ class BlockSequencer( traceContext: TraceContext ): FutureUnlessShutdown[Map[Member, TrafficState]] = { upToDateTrafficStatesForMembers( - stateManager.getHeadState.chunk.ephemeral.status.members.map(_.member), + stateManager.getHeadState.chunk.ephemeral.status.membersMap.keySet, Some(clock.now), ).map(_.view.mapValues(_.state).toMap) } @@ -472,7 +478,7 @@ class BlockSequencer( * @param updateTimestamp optionally, timestamp at which to compute the traffic states */ private def upToDateTrafficStatesForMembers( - requestedMembers: Seq[Member], + requestedMembers: Set[Member], updateTimestamp: Option[CantonTimestamp] = None, )(implicit traceContext: TraceContext @@ -484,18 +490,16 @@ class BlockSequencer( case Some(parameters) => // Use the head ephemeral state to get the known traffic states val headEphemeral = stateManager.getHeadState.chunk.ephemeral - val requestedMembersSet = requestedMembers.toSet - // Filter by authenticated, enabled members that have been requested - val knownValidMembers = headEphemeral.status.members.collect { - case SequencerMemberStatus(m @ (_: ParticipantId | _: MediatorId), _, _, true) - if m.isAuthenticated && Option - .when(requestedMembersSet.nonEmpty)(requestedMembersSet) - .forall(_.contains(m)) => + val disabledMembers = headEphemeral.status.disabledMembers + val knownValidMembers = headEphemeral.status.membersMap.keySet.collect { + case m @ (_: ParticipantId | _: MediatorId) + if !disabledMembers.contains(m) && + (requestedMembers.isEmpty || requestedMembers.contains(m)) => m } // Log if we're missing any states - val missingMembers = requestedMembersSet.diff(knownValidMembers.toSet) + val missingMembers = requestedMembers.diff(knownValidMembers) if (missingMembers.nonEmpty) logger.info( s"No traffic state found for the following members: ${missingMembers.mkString(", ")}" @@ -556,7 +560,7 @@ class BlockSequencer( override def trafficStatus(requestedMembers: Seq[Member])(implicit traceContext: TraceContext ): FutureUnlessShutdown[SequencerTrafficStatus] = { - upToDateTrafficStatesForMembers(requestedMembers) + upToDateTrafficStatesForMembers(requestedMembers.toSet) .map { updated => updated.map { case (member, TrafficStateUpdateResult(state, balanceUpdateSerial)) => MemberTrafficStatus( @@ -572,9 +576,7 @@ class BlockSequencer( } object BlockSequencer { - private case object CounterDiscriminator - - sealed trait LocalEvent + sealed trait LocalEvent extends Product with Serializable final case class DisableMember(member: Member) extends LocalEvent - final case class Prune(timestamp: CantonTimestamp) extends LocalEvent + final case class UpdateInitialMemberCounters(timestamp: CantonTimestamp) extends LocalEvent } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala index 605bd8500..15bcc8679 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerFactory.scala @@ -198,23 +198,23 @@ abstract class BlockSequencerFactory( if (nodeParameters.useNewTrafficControl) newTrafficBalanceClient else topologyTrafficBalanceClient - val rateLimitManager = makeRateLimitManager(balanceUpdateClient, futureSupervisor) + val rateLimitManager = makeRateLimitManager( + balanceUpdateClient, + futureSupervisor, + ) val domainLoggerFactory = loggerFactory.append("domainId", domainId.toString) - val stateManagerF = { - implicit val closeContext = CloseContext(domainSyncCryptoApi) - BlockSequencerStateManager( - protocolVersion, - domainId, - sequencerId, - store, - nodeParameters.enableAdditionalConsistencyChecks, - nodeParameters.processingTimeouts, - domainLoggerFactory, - rateLimitManager, - ) - } + val stateManagerF = BlockSequencerStateManager( + protocolVersion, + domainId, + sequencerId, + store, + nodeParameters.enableAdditionalConsistencyChecks, + nodeParameters.processingTimeouts, + domainLoggerFactory, + rateLimitManager, + ) for { _ <- balanceManager.initialize diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/MemberTrafficSnapshot.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/MemberTrafficSnapshot.scala index f56cb6c67..ddb684c6e 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/MemberTrafficSnapshot.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/traffic/MemberTrafficSnapshot.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.domain.sequencing.sequencer.traffic import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30.SequencerSnapshot.MemberTrafficSnapshot as MemberTrafficSnapshotP +import com.digitalasset.canton.sequencer.admin.v30.SequencerSnapshot.MemberTrafficSnapshot as MemberTrafficSnapshotP import com.digitalasset.canton.sequencing.protocol.TrafficState import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerAdministrationService.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerAdministrationService.scala index a676c4318..e331ff786 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerAdministrationService.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerAdministrationService.scala @@ -6,20 +6,40 @@ package com.digitalasset.canton.domain.sequencing.service import cats.data.EitherT import cats.syntax.bifunctor.* import cats.syntax.either.* +import cats.syntax.functor.* +import cats.syntax.traverse.* +import com.digitalasset.canton.ProtoDeserializationError +import com.digitalasset.canton.ProtoDeserializationError.FieldNotSet import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30 -import com.digitalasset.canton.domain.admin.v30.{ - SetTrafficBalanceRequest, - SetTrafficBalanceResponse, -} -import com.digitalasset.canton.domain.sequencing.sequencer.Sequencer +import com.digitalasset.canton.domain.sequencing.sequencer.{OnboardingStateForSequencer, Sequencer} import com.digitalasset.canton.error.CantonError import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* +import com.digitalasset.canton.protocol.StaticDomainParameters +import com.digitalasset.canton.sequencer.admin.v30 +import com.digitalasset.canton.sequencer.admin.v30.OnboardingStateRequest.Request +import com.digitalasset.canton.sequencer.admin.v30.{ + SetTrafficBalanceRequest, + SetTrafficBalanceResponse, +} import com.digitalasset.canton.sequencing.client.SequencerClient import com.digitalasset.canton.serialization.ProtoConverter -import com.digitalasset.canton.topology.Member +import com.digitalasset.canton.time.DomainTimeTracker +import com.digitalasset.canton.topology.client.DomainTopologyClient +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore +import com.digitalasset.canton.topology.store.{ + StoredTopologyTransactionX, + StoredTopologyTransactionsX, + TopologyStoreX, +} +import com.digitalasset.canton.topology.transaction.{ + SignedTopologyTransactionX, + TopologyChangeOpX, + TopologyMappingX, +} +import com.digitalasset.canton.topology.{Member, SequencerId} import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.EitherTUtil import io.grpc.{Status, StatusRuntimeException} @@ -29,6 +49,10 @@ import scala.concurrent.{ExecutionContext, Future} class GrpcSequencerAdministrationService( sequencer: Sequencer, sequencerClient: SequencerClient, + topologyStore: TopologyStoreX[DomainStore], + topologyClient: DomainTopologyClient, + domainTimeTracker: DomainTimeTracker, + staticDomainParameters: StaticDomainParameters, override val loggerFactory: NamedLoggerFactory, )(implicit executionContext: ExecutionContext @@ -92,6 +116,155 @@ class GrpcSequencerAdministrationService( ) } + override def onboardingState( + request: v30.OnboardingStateRequest + ): Future[v30.OnboardingStateResponse] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + val parseMemberOrTimestamp = request.request match { + case Request.Empty => Left(FieldNotSet("sequencer_id"): ProtoDeserializationError) + case Request.SequencerId(sequencerId) => + SequencerId + .fromProtoPrimitive(sequencerId, "sequencer_id") + .map(Left(_)) + + case Request.Timestamp(referenceEffectiveTime) => + CantonTimestamp.fromProtoTimestamp(referenceEffectiveTime).map(Right(_)) + } + (for { + memberOrTimestamp <- EitherT.fromEither[Future](parseMemberOrTimestamp).leftMap(_.toString) + referenceEffective <- memberOrTimestamp match { + case Left(sequencerId) => + EitherT( + topologyStore + .findFirstSequencerStateForSequencer(sequencerId) + .map(txOpt => + txOpt + .map(stored => stored.validFrom) + .toRight(s"Did not find onboarding topology transaction for $sequencerId") + ) + ) + case Right(timestamp) => + EitherT.rightT[Future, String](EffectiveTime(timestamp)) + } + + _ <- domainTimeTracker + .awaitTick(referenceEffective.value) + .map(EitherT.right[String](_).void) + .getOrElse(EitherTUtil.unit[String]) + + /* find the sequencer snapshot that contains a sequenced timestamp that is >= to the reference/onboarding effective time + if we take the sequencing time here, we might miss out topology transactions between sequencerSnapshot.lastTs and effectiveTime + in the following scenario: + t0: onboarding sequenced time + t1: sequencerSnapshot.lastTs + t2: sequenced time of some topology transaction + t3: onboarding effective time + + Therefore, if we find the sequencer snapshot that "contains" the onboarding effective time, + and we then use this snapshot's lastTs as the reference sequenced time for fetching the topology snapshot, + we can be sure that + a) the topology snapshot contains all topology transactions sequenced up to including the onboarding effective time + b) the topology snapshot might contain a few more transactions between the onboarding effective time and the last sequenced time in the block + c) the sequencer snapshot will contain the correct counter for the onboarding sequencer + d) the onboarding sequencer will properly subscribe from its own minimum counter that it gets initialized with from the sequencer snapshot + */ + + sequencerSnapshot <- sequencer.snapshot(referenceEffective.value) + + topologySnapshot <- EitherT.right[String]( + topologyStore.findEssentialStateAtSequencedTime( + SequencedTime(sequencerSnapshot.lastTs), + excludeMappings = Nil, + ) + ) + } yield (topologySnapshot, sequencerSnapshot)) + .fold[v30.OnboardingStateResponse]( + error => + v30.OnboardingStateResponse( + v30.OnboardingStateResponse.Value.Failure( + v30.OnboardingStateResponse.Failure(error) + ) + ), + { case (topologySnapshot, sequencerSnapshot) => + v30.OnboardingStateResponse( + v30.OnboardingStateResponse.Value.Success( + v30.OnboardingStateResponse.Success( + OnboardingStateForSequencer( + topologySnapshot, + staticDomainParameters, + sequencerSnapshot, + staticDomainParameters.protocolVersion, + ).toByteString + ) + ) + ) + }, + ) + } + + override def genesisState(request: v30.GenesisStateRequest): Future[v30.GenesisStateResponse] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + val result = for { + timestampO <- EitherT + .fromEither[Future]( + request.timestamp.traverse(CantonTimestamp.fromProtoTimestamp) + ) + .leftMap(_.toString) + + sequencedTimestamp <- timestampO match { + case Some(value) => EitherT.rightT[Future, String](value) + case None => + val sequencedTimeF = topologyStore + .maxTimestamp() + .collect { + case Some((sequencedTime, _)) => + Right(sequencedTime.value) + + case None => Left("No sequenced time found") + } + + EitherT(sequencedTimeF) + } + + topologySnapshot <- EitherT.right[String]( + topologyStore.findEssentialStateAtSequencedTime( + SequencedTime(sequencedTimestamp), + // we exclude vetted packages from the genesis state because we need to upload them again anyway + excludeMappings = Seq(TopologyMappingX.Code.VettedPackagesX), + ) + ) + // reset effective time and sequenced time if we are initializing the sequencer from the beginning + genesisState: StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX] = + StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]( + topologySnapshot.result.map(stored => + StoredTopologyTransactionX( + SequencedTime(SignedTopologyTransactionX.InitialTopologySequencingTime), + EffectiveTime(SignedTopologyTransactionX.InitialTopologySequencingTime), + stored.validUntil.map(_ => + EffectiveTime(SignedTopologyTransactionX.InitialTopologySequencingTime) + ), + stored.transaction, + ) + ) + ) + + } yield genesisState.toByteString(staticDomainParameters.protocolVersion) + + result + .fold[v30.GenesisStateResponse]( + error => + v30.GenesisStateResponse( + v30.GenesisStateResponse.Value.Failure(v30.GenesisStateResponse.Failure(error)) + ), + result => + v30.GenesisStateResponse( + v30.GenesisStateResponse.Value.Success( + v30.GenesisStateResponse.Success(result) + ) + ), + ) + } + override def disableMember( requestP: v30.DisableMemberRequest ): Future[v30.DisableMemberResponse] = { diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerInitializationServiceX.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerInitializationServiceX.scala index 28df2b4c7..58c05fcbd 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerInitializationServiceX.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerInitializationServiceX.scala @@ -7,21 +7,34 @@ import cats.data.EitherT import cats.syntax.either.* import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure import com.digitalasset.canton.domain.Domain.FailedToInitialiseDomainNode -import com.digitalasset.canton.domain.admin.v30.SequencerInitializationServiceGrpc.SequencerInitializationService -import com.digitalasset.canton.domain.admin.v30.{ +import com.digitalasset.canton.domain.sequencing.admin.grpc.{ InitializeSequencerRequest, InitializeSequencerResponse, - InitializeSequencerVersionedRequest, - InitializeSequencerVersionedResponse, -} -import com.digitalasset.canton.domain.sequencing.admin.grpc.{ - InitializeSequencerRequestX, - InitializeSequencerResponseX, } +import com.digitalasset.canton.domain.sequencing.sequencer.OnboardingStateForSequencer import com.digitalasset.canton.error.CantonError import com.digitalasset.canton.lifecycle.FutureUnlessShutdown import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* +import com.digitalasset.canton.protocol.StaticDomainParameters +import com.digitalasset.canton.sequencer.admin.v30.SequencerInitializationServiceGrpc.SequencerInitializationService +import com.digitalasset.canton.sequencer.admin.v30.{ + InitializeSequencerFromGenesisStateRequest, + InitializeSequencerFromGenesisStateResponse, + InitializeSequencerFromOnboardingStateRequest, + InitializeSequencerFromOnboardingStateResponse, +} +import com.digitalasset.canton.serialization.ProtoConverter +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.{ + StoredTopologyTransactionX, + StoredTopologyTransactionsX, +} +import com.digitalasset.canton.topology.transaction.{ + SignedTopologyTransactionX, + TopologyChangeOpX, + TopologyMappingX, +} import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import scala.concurrent.{ExecutionContext, Future} @@ -34,47 +47,84 @@ class GrpcSequencerInitializationServiceX( ) extends SequencerInitializationService with NamedLogging { - override def initializeSequencer( - requestP: InitializeSequencerRequest - ): Future[InitializeSequencerResponse] = { + override def initializeSequencerFromGenesisState( + request: InitializeSequencerFromGenesisStateRequest + ): Future[InitializeSequencerFromGenesisStateResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - val res: EitherT[Future, CantonError, InitializeSequencerResponse] = for { - request <- EitherT.fromEither[Future]( - InitializeSequencerRequestX - .fromProtoV30(requestP) + + val res: EitherT[Future, CantonError, InitializeSequencerFromGenesisStateResponse] = for { + topologyState <- EitherT.fromEither[Future]( + StoredTopologyTransactionsX + .fromByteString(request.topologySnapshot) + .leftMap(ProtoDeserializationFailure.Wrap(_)) + ) + + domainParameters <- EitherT.fromEither[Future]( + ProtoConverter + .parseRequired( + StaticDomainParameters.fromProtoV30, + "domain_parameters", + request.domainParameters, + ) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) + // TODO(i17940): Remove this when we have a method to distinguish between initialization during an upgrade and initialization during the bootstrap of a domain + // reset effective time and sequenced time if we are initializing the sequencer from the beginning + genesisState: StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX] = + StoredTopologyTransactionsX[TopologyChangeOpX, TopologyMappingX]( + topologyState.result.map(stored => + StoredTopologyTransactionX( + SequencedTime(SignedTopologyTransactionX.InitialTopologySequencingTime), + EffectiveTime(SignedTopologyTransactionX.InitialTopologySequencingTime), + stored.validUntil.map(_ => + EffectiveTime(SignedTopologyTransactionX.InitialTopologySequencingTime) + ), + stored.transaction, + ) + ) + ) + + initializeRequest = InitializeSequencerRequest(genesisState, domainParameters, None) result <- handler - .initialize(request) + .initialize(initializeRequest) .leftMap(FailedToInitialiseDomainNode.Failure(_)) .onShutdown(Left(FailedToInitialiseDomainNode.Shutdown())): EitherT[ Future, CantonError, - InitializeSequencerResponseX, + InitializeSequencerResponse, ] - } yield result.toProtoV30 + } yield InitializeSequencerFromGenesisStateResponse(result.replicated) mapErrNew(res) } - override def initializeSequencerVersioned( - requestP: InitializeSequencerVersionedRequest - ): Future[InitializeSequencerVersionedResponse] = { + override def initializeSequencerFromOnboardingState( + request: InitializeSequencerFromOnboardingStateRequest + ): Future[InitializeSequencerFromOnboardingStateResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - val res: EitherT[Future, CantonError, InitializeSequencerVersionedResponse] = for { - request <- EitherT.fromEither[Future]( - InitializeSequencerRequestX - .fromProtoV30(requestP) + val res: EitherT[Future, CantonError, InitializeSequencerFromOnboardingStateResponse] = for { + onboardingState <- EitherT.fromEither[Future]( + OnboardingStateForSequencer + // according to @rgugliel-da, this is safe to do here. + // the caller of this endpoint could get the onboarding state from various sequencers + // and compare them for byte-for-byte equality, to increase the confidence that this + // is safe to deserialize + .fromByteStringUnsafe(request.onboardingState) .leftMap(ProtoDeserializationFailure.Wrap(_)) ) + initializeRequest = InitializeSequencerRequest( + onboardingState.topologySnapshot, + onboardingState.staticDomainParameters, + Some(onboardingState.sequencerSnapshot), + ) result <- handler - .initialize(request) + .initialize(initializeRequest) .leftMap(FailedToInitialiseDomainNode.Failure(_)) .onShutdown(Left(FailedToInitialiseDomainNode.Shutdown())): EitherT[ Future, CantonError, - InitializeSequencerResponseX, + InitializeSequencerResponse, ] - } yield InitializeSequencerVersionedResponse(result.replicated) + } yield InitializeSequencerFromOnboardingStateResponse(result.replicated) mapErrNew(res) } @@ -82,8 +132,8 @@ class GrpcSequencerInitializationServiceX( object GrpcSequencerInitializationServiceX { trait Callback { - def initialize(request: InitializeSequencerRequestX)(implicit + def initialize(request: InitializeSequencerRequest)(implicit traceContext: TraceContext - ): EitherT[FutureUnlessShutdown, String, InitializeSequencerResponseX] + ): EitherT[FutureUnlessShutdown, String, InitializeSequencerResponse] } } diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerPruningAdministrationService.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerPruningAdministrationService.scala index ef987fdd1..a115bdf5f 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerPruningAdministrationService.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerPruningAdministrationService.scala @@ -9,10 +9,10 @@ import com.digitalasset.canton.admin.grpc.{GrpcPruningScheduler, HasPruningSched import com.digitalasset.canton.admin.pruning.v30.LocatePruningTimestamp import com.digitalasset.canton.config.RequireTypes.PositiveInt import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30 import com.digitalasset.canton.domain.sequencing.sequencer.{PruningError, Sequencer} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.scheduler.PruningScheduler +import com.digitalasset.canton.sequencer.admin.v30 import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} import com.digitalasset.canton.util.EitherTUtil diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerVersionService.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerVersionService.scala index 34dd1c513..50b01f4c6 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerVersionService.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/service/GrpcSequencerVersionService.scala @@ -3,9 +3,9 @@ package com.digitalasset.canton.domain.sequencing.service -import com.digitalasset.canton.domain.admin.v30.SequencerVersion -import com.digitalasset.canton.domain.admin.v30.SequencerVersionServiceGrpc.SequencerVersionService import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.sequencer.admin.v30.SequencerVersion +import com.digitalasset.canton.sequencer.admin.v30.SequencerVersionServiceGrpc.SequencerVersionService import com.digitalasset.canton.version.ProtocolVersion import scala.concurrent.Future diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/TrafficBalance.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/TrafficBalance.scala index ed288c6ca..1477c0fef 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/TrafficBalance.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/sequencing/traffic/TrafficBalance.scala @@ -5,8 +5,8 @@ package com.digitalasset.canton.domain.sequencing.traffic import com.digitalasset.canton.config.RequireTypes.{NonNegativeLong, PositiveInt} import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.admin.v30.SequencerSnapshot.TrafficBalance as TrafficBalanceP import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.sequencer.admin.v30.SequencerSnapshot.TrafficBalance as TrafficBalanceP import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.topology.Member diff --git a/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala b/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala index 372f86d2f..7ca1b8831 100644 --- a/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala +++ b/community/domain/src/main/scala/com/digitalasset/canton/domain/service/GrpcSequencerConnectionService.scala @@ -8,16 +8,12 @@ import cats.syntax.either.* import com.daml.grpc.adapter.ExecutionSequencerFactory import com.digitalasset.canton.DomainAlias import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader +import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader.SequencerAggregatedInfo import com.digitalasset.canton.concurrent.FutureSupervisor -import com.digitalasset.canton.domain.admin.v30 -import com.digitalasset.canton.domain.admin.v30.SequencerConnectionServiceGrpc.SequencerConnectionService -import com.digitalasset.canton.lifecycle.{ - CloseContext, - FlagCloseable, - FutureUnlessShutdown, - PromiseUnlessShutdown, -} +import com.digitalasset.canton.lifecycle.{CloseContext, FlagCloseable, PromiseUnlessShutdown} import com.digitalasset.canton.logging.ErrorLoggingContext +import com.digitalasset.canton.mediator.admin.v30 +import com.digitalasset.canton.mediator.admin.v30.SequencerConnectionServiceGrpc.SequencerConnectionService import com.digitalasset.canton.networking.grpc.CantonMutableHandlerRegistry import com.digitalasset.canton.sequencing.client.SequencerClient.SequencerTransports import com.digitalasset.canton.sequencing.client.{ @@ -27,7 +23,7 @@ import com.digitalasset.canton.sequencing.client.{ } import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, - SequencerConnection, + SequencerConnectionValidation, SequencerConnections, } import com.digitalasset.canton.serialization.ProtoConverter @@ -46,7 +42,7 @@ class GrpcSequencerConnectionService( fetchConnection: () => EitherT[Future, String, Option[ SequencerConnections ]], - setConnection: SequencerConnections => EitherT[ + setConnection: (SequencerConnectionValidation, SequencerConnections) => EitherT[ Future, String, Unit, @@ -69,8 +65,16 @@ class GrpcSequencerConnectionService( EitherTUtil.toFuture(for { existing <- getConnection requestedReplacement <- parseConnection(request) - _ <- validateReplacement(existing, requestedReplacement) - _ <- setConnection(requestedReplacement) + _ <- validateReplacement( + existing, + requestedReplacement, + ) + validation <- EitherT.fromEither[Future]( + SequencerConnectionValidation + .fromProtoV30(request.sequencerConnectionValidation) + .leftMap(err => Status.INVALID_ARGUMENT.withDescription(err.message).asException()) + ) + _ <- setConnection(validation, requestedReplacement) .leftMap(error => Status.FAILED_PRECONDITION.withDescription(error).asException()) } yield v30.SetConnectionResponse()) @@ -93,8 +97,7 @@ class GrpcSequencerConnectionService( private def parseConnection( request: v30.SetConnectionRequest ): EitherT[Future, StatusException, SequencerConnections] = { - val v30.SetConnectionRequest(sequencerConnectionsPO) = request - + val v30.SetConnectionRequest(sequencerConnectionsPO, validation) = request ProtoConverter .required("sequencerConnections", sequencerConnectionsPO) .flatMap(SequencerConnections.fromProtoV30) @@ -141,7 +144,6 @@ object GrpcSequencerConnectionService { executionServiceFactory: ExecutionSequencerFactory, materializer: Materializer, traceContext: TraceContext, - errorLoggingContext: ErrorLoggingContext, closeContext: CloseContext, ): UpdateSequencerClient = { val clientO = new AtomicReference[Option[RichSequencerClient]](None) @@ -149,7 +151,7 @@ object GrpcSequencerConnectionService { SequencerConnectionService.bindService( new GrpcSequencerConnectionService( fetchConnection = () => fetchConfig().map(_.map(sequencerConnectionLens.get)), - setConnection = newSequencerConnection => + setConnection = (sequencerConnectionValidation, newSequencerConnection) => for { currentConfig <- fetchConfig() newConfig <- currentConfig.fold( @@ -159,17 +161,14 @@ object GrpcSequencerConnectionService { )(config => EitherT.rightT(sequencerConnectionLens.replace(newSequencerConnection)(config)) ) - // validate connection before making transport (as making transport will hang if the connection - // is invalid) - _ <- transportFactory - .validateTransport( - newSequencerConnection, - logWarning = false, - ) - .onShutdown(Left("Aborting due to shutdown")) + // load and potentially validate the new connection newEndpointsInfo <- sequencerInfoLoader - .loadSequencerEndpoints(domainAlias, newSequencerConnection) + .loadAndAggregateSequencerEndpoints( + domainAlias, + newSequencerConnection, + sequencerConnectionValidation, + ) .leftMap(_.cause) sequencerTransportsMap <- transportFactory @@ -210,7 +209,7 @@ object GrpcSequencerConnectionService { } def waitUntilSequencerConnectionIsValid( - factory: SequencerClientTransportFactory, + sequencerInfoLoader: SequencerInfoLoader, flagCloseable: FlagCloseable, futureSupervisor: FutureSupervisor, loadConfig: => EitherT[Future, String, Option[ @@ -220,23 +219,30 @@ object GrpcSequencerConnectionService { errorLoggingContext: ErrorLoggingContext, traceContext: TraceContext, executionContext: ExecutionContextExecutor, - ): EitherT[Future, String, SequencerConnections] = { + ): EitherT[Future, String, SequencerAggregatedInfo] = { val promise = - new PromiseUnlessShutdown[Either[String, SequencerConnection]]( + new PromiseUnlessShutdown[Either[String, SequencerAggregatedInfo]]( "wait-for-valid-connection", futureSupervisor, ) flagCloseable.runOnShutdown_(promise) implicit val closeContext = CloseContext(flagCloseable) + val alias = DomainAlias.tryCreate("domain") - def tryNewConfig: EitherT[FutureUnlessShutdown, String, SequencerConnections] = { - flagCloseable - .performUnlessClosingEitherU("load config")(loadConfig) + def tryNewConfig: EitherT[Future, String, SequencerAggregatedInfo] = { + loadConfig .flatMap { case Some(settings) => - factory - .validateTransport(settings, logWarning = true) - .map(_ => settings) + sequencerInfoLoader + .loadAndAggregateSequencerEndpoints( + alias, + settings, + SequencerConnectionValidation.Active, + ) + .leftMap { e => + errorLoggingContext.logger.warn(s"Waiting for valid sequencer connection ${e}") + e.toString + } case None => EitherT.leftT("No sequencer connection config") } } @@ -250,11 +256,10 @@ object GrpcSequencerConnectionService { delay = 50.millis, operationName = "wait-for-valid-sequencer-connection", ) - .unlessShutdown( + .apply( tryNewConfig.value, NoExnRetryable, ) - .onShutdown(Left("Aborting due to shutdown")) ) } diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/EphemeralStateTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/EphemeralStateTest.scala deleted file mode 100644 index 4054366bd..000000000 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/EphemeralStateTest.scala +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.domain.sequencing.integrations.state - -import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.domain.block.data.EphemeralState -import com.digitalasset.canton.domain.sequencing.sequencer.{ - InternalSequencerPruningStatus, - SequencerMemberStatus, -} -import com.digitalasset.canton.topology.ParticipantId -import com.digitalasset.canton.{BaseTest, SequencerCounter} -import org.scalatest.wordspec.AnyWordSpec - -class EphemeralStateTest extends AnyWordSpec with BaseTest { - private val t1 = CantonTimestamp.Epoch.plusSeconds(1) - private val alice = ParticipantId("alice") - private val bob = ParticipantId("bob") - private val carlos = ParticipantId("carlos") - - "nextCounters" should { - "throw error if member is not registered" in { - val state = EphemeralState( - Map.empty, - Map.empty, - InternalSequencerPruningStatus( - t1, - Seq(SequencerMemberStatus(alice, t1, None), SequencerMemberStatus(bob, t1, None)), - ), - ) - an[IllegalArgumentException] should be thrownBy state.tryNextCounters(Set(carlos)) - } - - "increment existing counters and otherwise use genesis for members without an existing counter" in { - val counters = EphemeralState( - Map(alice -> SequencerCounter(2)), - Map.empty, - InternalSequencerPruningStatus( - t1, - Seq(SequencerMemberStatus(alice, t1, None), SequencerMemberStatus(bob, t1, None)), - ), - ) - .tryNextCounters(Set(alice, bob)) - - counters should contain.only( - alice -> SequencerCounter(3), - bob -> SequencerCounter.Genesis, - ) - } - } -} diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/SequencerStateManagerStoreTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/SequencerStateManagerStoreTest.scala index 964435ab2..2875a1a9d 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/SequencerStateManagerStoreTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/integrations/state/SequencerStateManagerStoreTest.scala @@ -113,7 +113,7 @@ trait SequencerStateManagerStoreTest head <- store.readAtBlockTimestamp(CantonTimestamp.Epoch) } yield { head.registeredMembers shouldBe empty - head.heads shouldBe empty + head.checkpoints shouldBe empty } } @@ -140,13 +140,13 @@ trait SequencerStateManagerStoreTest head <- store.readAtBlockTimestamp(t2) } yield { stateAtT1.registeredMembers should contain.only(alice, bob) - stateAtT1.heads(alice) shouldBe SequencerCounter(0) - stateAtT1.heads.keys should contain only alice + stateAtT1.headCounter(alice) should contain(SequencerCounter(0)) + stateAtT1.checkpoints.keys should contain only alice head.registeredMembers should contain.only(alice, bob, carlos) - head.heads(alice) shouldBe SequencerCounter(1) - head.heads(bob) shouldBe SequencerCounter(0) - head.heads.keys should not contain carlos + head.headCounter(alice) should contain(SequencerCounter(1)) + head.headCounter(bob) should contain(SequencerCounter(0)) + head.checkpoints.keys should not contain carlos } } @@ -188,15 +188,15 @@ trait SequencerStateManagerStoreTest head <- store.readAtBlockTimestamp(t2) } yield { stateAtT1.registeredMembers should contain.only(alice, bob) - stateAtT1.heads(alice) shouldBe SequencerCounter(0) - stateAtT1.heads.keys should contain only alice + stateAtT1.headCounter(alice) should contain(SequencerCounter(0)) + stateAtT1.checkpoints.keys should contain only alice stateAtT1.trafficState(alice) shouldBe trafficStateAlice stateAtT1.trafficState.keys should contain only alice head.registeredMembers should contain.only(alice, bob, carlos) - head.heads(alice) shouldBe SequencerCounter(1) - head.heads(bob) shouldBe SequencerCounter(0) - head.heads.keys should not contain carlos + head.headCounter(alice) should contain(SequencerCounter(1)) + head.headCounter(bob) should contain(SequencerCounter(0)) + head.checkpoints.keys should not contain carlos head.trafficState(alice) shouldBe trafficStateAlice2 head.trafficState(bob) shouldBe trafficStateBob head.trafficState.keys should not contain carlos @@ -217,7 +217,7 @@ trait SequencerStateManagerStoreTest head <- store.readAtBlockTimestamp(t2) } yield { head.registeredMembers should contain.only(alice, bob) - head.heads(alice) shouldBe SequencerCounter(0) + head.headCounter(alice) should contain(SequencerCounter(0)) } } @@ -323,7 +323,7 @@ trait SequencerStateManagerStoreTest _ <- store.addMember(alice, t1) state <- store.readAtBlockTimestamp(t1) } yield { - state.heads.get(alice) should be(None) + state.headCounter(alice) shouldBe None } } } @@ -352,7 +352,7 @@ trait SequencerStateManagerStoreTest _ <- store.addEvents(Map(bob -> send(bob, SequencerCounter(0), t2, message)), Map.empty) _ <- store.addEvents(Map(bob -> send(bob, SequencerCounter(1), t3, message)), Map.empty) state <- store.readAtBlockTimestamp(t3) - } yield state.heads(bob) shouldBe SequencerCounter(1) + } yield state.headCounter(bob) should contain(SequencerCounter(1)) } } diff --git a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala index 21fc0080d..87b0bbb1b 100644 --- a/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala +++ b/community/domain/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/block/BlockSequencerTest.scala @@ -15,15 +15,14 @@ import com.digitalasset.canton.crypto.DomainSyncCryptoClient import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.block.BlockSequencerStateManager.ChunkState import com.digitalasset.canton.domain.block.data.memory.InMemorySequencerBlockStore -import com.digitalasset.canton.domain.block.data.{ - BlockEphemeralState, - BlockInfo, - BlockUpdateClosureWithHeight, - EphemeralState, -} +import com.digitalasset.canton.domain.block.data.{BlockEphemeralState, BlockInfo, EphemeralState} import com.digitalasset.canton.domain.block.{ + BlockEvents, BlockSequencerStateManager, BlockSequencerStateManagerBase, + BlockUpdate, + BlockUpdateGenerator, + OrderedBlockUpdate, RawLedgerBlock, SequencerDriverHealthStatus, } @@ -35,7 +34,7 @@ import com.digitalasset.canton.domain.sequencing.sequencer.errors.{ } import com.digitalasset.canton.domain.sequencing.traffic.RateLimitManagerTesting import com.digitalasset.canton.domain.sequencing.traffic.store.memory.InMemoryTrafficBalanceStore -import com.digitalasset.canton.lifecycle.{AsyncOrSyncCloseable, FutureUnlessShutdown} +import com.digitalasset.canton.lifecycle.AsyncOrSyncCloseable import com.digitalasset.canton.logging.TracedLogger import com.digitalasset.canton.logging.pretty.CantonPrettyPrinter import com.digitalasset.canton.resource.MemoryStorage @@ -60,10 +59,11 @@ import com.digitalasset.canton.topology.processing.{ import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore import com.digitalasset.canton.topology.store.ValidatedTopologyTransactionX import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStoreX -import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.tracing.{TraceContext, Traced} import com.digitalasset.canton.{BaseTest, HasExecutionContext, SequencerCounter} +import org.apache.pekko.NotUsed import org.apache.pekko.actor.ActorSystem -import org.apache.pekko.stream.scaladsl.{Keep, Source} +import org.apache.pekko.stream.scaladsl.{Flow, Keep, Source} import org.apache.pekko.stream.{KillSwitch, KillSwitches, Materializer} import org.scalatest.wordspec.AsyncWordSpec @@ -77,7 +77,7 @@ class BlockSequencerTest with RateLimitManagerTesting { "BlockSequencer" should { - "process a lot of blocks during catch up" in withEnv() { implicit env => + "process a lot of blocks during catch up" in withEnv { implicit env => env.fakeBlockSequencerOps.completed.future.map(_ => succeed) } } @@ -87,16 +87,14 @@ class BlockSequencerTest private val N = 1_000_000 - private def withEnv[T]( - initial: Option[BlockEphemeralState] = None - )(test: Environment => Future[T]): Future[T] = { - val env = new Environment(initial) + private def withEnv[T](test: Environment => Future[T]): Future[T] = { + val env = new Environment complete { test(env) } lastly env.close() } - private class Environment(initial: Option[BlockEphemeralState] = None) extends AutoCloseable { + private class Environment extends AutoCloseable { private val actorSystem = ActorSystem() implicit val materializer: Materializer = Materializer(actorSystem) @@ -149,10 +147,6 @@ class BlockSequencerTest private val store = new InMemorySequencerBlockStore(None, loggerFactory) - Await.result( - initial.fold(Future.unit)(store.setInitialState(_, None)), - 1.second, - ) private val balanceStore = new InMemoryTrafficBalanceStore(loggerFactory) @@ -237,12 +231,13 @@ class BlockSequencerTest override val maybeLowerTopologyTimestampBound: Option[CantonTimestamp] = None - override def handleBlock( - updateClosure: BlockUpdateClosureWithHeight - ): FutureUnlessShutdown[BlockEphemeralState] = - FutureUnlessShutdown.pure( - BlockEphemeralState(BlockInfo.initial, EphemeralState.empty) - ) // Discarded anyway + override def processBlock( + bug: BlockUpdateGenerator + ): Flow[BlockEvents, Traced[OrderedBlockUpdate], NotUsed] = + Flow[BlockEvents].mapConcat(_ => Seq.empty) + + override def applyBlockUpdate: Flow[Traced[BlockUpdate], Traced[CantonTimestamp], NotUsed] = + Flow[Traced[BlockUpdate]].map(_.map(_ => CantonTimestamp.MinValue)) override def getHeadState: BlockSequencerStateManager.HeadState = BlockSequencerStateManager.HeadState( @@ -261,15 +256,7 @@ class BlockSequencerTest ): CreateSubscription = ??? override private[domain] def firstSequencerCounterServableForSequencer : com.digitalasset.canton.SequencerCounter = ??? - override def handleLocalEvent( - event: com.digitalasset.canton.domain.sequencing.sequencer.block.BlockSequencer.LocalEvent - )(implicit - traceContext: com.digitalasset.canton.tracing.TraceContext - ): scala.concurrent.Future[Unit] = ??? override def isMemberEnabled(member: com.digitalasset.canton.topology.Member): Boolean = ??? - override def pruneLocalDatabase(timestamp: com.digitalasset.canton.data.CantonTimestamp)( - implicit traceContext: com.digitalasset.canton.tracing.TraceContext - ): scala.concurrent.Future[Unit] = ??? override def waitForAcknowledgementToComplete( member: com.digitalasset.canton.topology.Member, timestamp: com.digitalasset.canton.data.CantonTimestamp, @@ -281,6 +268,6 @@ class BlockSequencerTest ): scala.concurrent.Future[Unit] = ??? override def waitForPruningToComplete( timestamp: com.digitalasset.canton.data.CantonTimestamp - ): scala.concurrent.Future[String] = ??? + ): (Boolean, Future[Unit]) = ??? } } diff --git a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/CommunityReferenceBlockOrdererFactory.scala b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/CommunityReferenceBlockOrdererFactory.scala index 8f38c7561..5ce0c4dcd 100644 --- a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/CommunityReferenceBlockOrdererFactory.scala +++ b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/CommunityReferenceBlockOrdererFactory.scala @@ -12,6 +12,7 @@ import com.digitalasset.canton.config.{ ConnectionAllocation, DbParametersConfig, ProcessingTimeout, + QueryCostMonitoringConfig, StorageConfig, } import com.digitalasset.canton.data.CantonTimestamp @@ -64,6 +65,8 @@ class CommunityReferenceBlockOrdererFactory extends BlockOrdererFactory { deriveReader[CommunityDbConfig.Postgres] implicit val communityStorageConfigReader: ConfigReader[CommunityStorageConfig] = deriveReader[CommunityStorageConfig] + implicit val queryCostMonitoringConfigReader: ConfigReader[QueryCostMonitoringConfig] = + deriveReader[QueryCostMonitoringConfig] deriveReader[ConfigType] } @@ -94,6 +97,8 @@ class CommunityReferenceBlockOrdererFactory extends BlockOrdererFactory { deriveWriter[CommunityDbConfig.Postgres] implicit val communityStorageConfigWriter: ConfigWriter[CommunityStorageConfig] = deriveWriter[CommunityStorageConfig] + implicit val queryCostMonitoringConfigWriter: ConfigWriter[QueryCostMonitoringConfig] = + deriveWriter[QueryCostMonitoringConfig] deriveWriter[ConfigType] } @@ -160,7 +165,7 @@ object CommunityReferenceBlockOrdererFactory { new CommunityStorageFactory(communityStorageConfig) .tryCreate( connectionPoolForParticipant = false, - logQueryCost = None, + logQueryCost = config.logQueryCost, clock = clock, scheduler = None, metrics = new DbStorageMetrics(MetricName("none"), NoOpMetricsFactory)( diff --git a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/ReferenceBlockOrderer.scala b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/ReferenceBlockOrderer.scala index 5da7b477e..4cda59869 100644 --- a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/ReferenceBlockOrderer.scala +++ b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/ReferenceBlockOrderer.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.domain.sequencing.sequencer.reference import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config -import com.digitalasset.canton.config.{ProcessingTimeout, StorageConfig} +import com.digitalasset.canton.config.{ProcessingTimeout, QueryCostMonitoringConfig, StorageConfig} import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.domain.block.BlockOrderingSequencer.BatchTag import com.digitalasset.canton.domain.block.{ @@ -75,37 +75,33 @@ class ReferenceBlockOrderer( (), ) .viaMat(KillSwitches.single)(Keep.right) - .mapAsync(1)(_ => store.countBlocks().map(_ - 1L)) .scanAsync( - (fromHeight - 1L, Seq[BlockOrderer.Block]()) - ) { case ((lastHeight, _), currentHeight) => + fromHeight -> Seq[BlockOrderer.Block]() + ) { case ((nextFromHeight, _), _tick) => for { newBlocks <- - if (currentHeight > lastHeight) { - store.queryBlocks(lastHeight + 1L).map { timestampedBlocks => - val blocks = timestampedBlocks.map(_.block) - if (logger.underlying.isDebugEnabled()) { - logger.debug( - s"New blocks (${blocks.length}) at heights ${lastHeight + 1} to $currentHeight, specifically at ${blocks.map(_.blockHeight).mkString(",")}" + store.queryBlocks(nextFromHeight).map { timestampedBlocks => + val blocks = timestampedBlocks.map(_.block) + if (logger.underlying.isDebugEnabled() && blocks.nonEmpty) { + logger.debug( + s"New blocks (${blocks.length}) starting at height $nextFromHeight, specifically at ${blocks.map(_.blockHeight).mkString(",")}" + ) + } + blocks.lastOption.foreach { lastBlock => + val expectedLastBlockHeight = nextFromHeight + blocks.length - 1 + if (lastBlock.blockHeight != expectedLastBlockHeight) { + logger.warn( + s"Last block height was expected to be $expectedLastBlockHeight but was ${lastBlock.blockHeight}. " + + "This might point to a gap in queried blocks (visible under debug logging) and cause the BlockSequencer subscription to become stuck." ) } - blocks.lastOption.foreach { lastBlock => - if (lastBlock.blockHeight != lastHeight + blocks.length) { - logger.warn( - s"Last block height was expected to be ${lastHeight + blocks.length} but was ${lastBlock.blockHeight}. " + - "This might point to a gap in queried blocks (visible under debug logging) and cause the BlockSequencer subscription to become stuck." - ) - } - } - blocks } - } else { - Future.successful(Seq.empty[BlockOrderer.Block]) + blocks } } yield { - // Setting the "new lastHeight" watermark block height based on the number of new blocks seen + // Setting the "new nextFromHeight" watermark block height based on the number of new blocks seen // assumes that store.queryBlocks returns consecutive blocks with "no gaps". See #13539. - (lastHeight + newBlocks.size) -> newBlocks + (nextFromHeight + newBlocks.size) -> newBlocks } } .mapConcat(_._2) @@ -167,6 +163,7 @@ object ReferenceBlockOrderer { storage: StorageConfigT, pollInterval: config.NonNegativeFiniteDuration = config.NonNegativeFiniteDuration.ofMillis(100), + logQueryCost: Option[QueryCostMonitoringConfig] = None, ) final case class TimestampedRequest(tag: String, body: ByteString, timestamp: CantonTimestamp) diff --git a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala index 21ca15b11..3ff4ca7c2 100644 --- a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala +++ b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/DbReferenceBlockOrderingStore.scala @@ -173,10 +173,10 @@ class DbReferenceBlockOrderingStore( ) } - override def countBlocks()(implicit + override def maxBlockHeight()(implicit traceContext: TraceContext - ): Future[Long] = - storage.query(sql"""select count(*) from blocks""".as[Long].head, "count blocks") + ): Future[Option[Long]] = + storage.query(sql"""select max(id) from blocks""".as[Option[Long]].head, "max block height") override def queryBlocks(initialHeight: Long)(implicit traceContext: TraceContext diff --git a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala index 6d78a1066..913008c98 100644 --- a/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala +++ b/community/drivers/reference/src/main/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStore.scala @@ -31,9 +31,9 @@ trait ReferenceBlockOrderingStore { traceContext: TraceContext ): Future[Unit] - def countBlocks()(implicit + def maxBlockHeight()(implicit traceContext: TraceContext - ): Future[Long] + ): Future[Option[Long]] def queryBlocks(initialHeight: Long)(implicit traceContext: TraceContext @@ -104,9 +104,9 @@ class InMemoryReferenceSequencerDriverStore extends ReferenceBlockOrderingStore Future.unit } - override def countBlocks()(implicit + override def maxBlockHeight()(implicit traceContext: TraceContext - ): Future[Long] = Future.successful(deque.size().toLong) + ): Future[Option[Long]] = Future.successful(Option.when(!deque.isEmpty)(deque.size().toLong - 1)) /** Query available blocks starting with the specified initial height. * The blocks need to be returned in consecutive block-height order i.e. contain no "gaps". diff --git a/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala b/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala index 9a5901edf..b52f223c8 100644 --- a/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala +++ b/community/drivers/reference/src/test/scala/com/digitalasset/canton/domain/sequencing/sequencer/reference/store/ReferenceBlockOrderingStoreTest.scala @@ -49,11 +49,13 @@ trait ReferenceBlockOrderingStoreTest extends AsyncWordSpec with BaseTest { "increment counter when inserting block" in { val sut = mk() for { - count <- sut.countBlocks() - _ = count shouldBe 0 + count <- sut.maxBlockHeight() + _ = count shouldBe None _ <- sut.insertRequest(event1) - count2 <- sut.countBlocks() - } yield count2 shouldBe 1 + count2 <- sut.maxBlockHeight() + } yield { + count2 shouldBe Some(0) + } } } "queryBlocks" should { diff --git a/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommunityEnvironmentDefinition.scala b/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommunityEnvironmentDefinition.scala index 096968c28..10dba1924 100644 --- a/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommunityEnvironmentDefinition.scala +++ b/community/integration-testing/src/main/scala/com/digitalasset/canton/integration/CommunityEnvironmentDefinition.scala @@ -94,9 +94,6 @@ object CommunityEnvironmentDefinition { lazy val simpleTopology: CommunityEnvironmentDefinition = fromResource("examples/01-simple-topology/simple-topology.conf") - lazy val simpleTopologyX: CommunityEnvironmentDefinition = - fromResource("examples/01-simple-topology/simple-topology-x.conf") - def fromResource(path: String): CommunityEnvironmentDefinition = CommunityEnvironmentDefinition( baseConfig = loadConfigFromResource(path), diff --git a/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/Main.scala b/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/Main.scala index e93fbec8a..b164b2f9c 100644 --- a/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/Main.scala +++ b/community/ledger-service/http-json-perf/src/main/scala/com/daml/http/perf/Main.scala @@ -148,10 +148,9 @@ object Main extends StrictLogging { def runScenario(config: Config[String]) = resolveSimulationClass(config.scenario).flatMap { _ => - withLedger(config.dars) { (ledgerPort, _, ledgerId) => + withLedger(config.dars) { (ledgerPort, _, _) => QueryStoreBracket.withJsonApiJdbcConfig(config.queryStoreIndex) { jsonApiJdbcConfig => withHttpService( - ledgerId.unwrap, ledgerPort, jsonApiJdbcConfig, None, diff --git a/community/ledger-service/http-json/src/failurelib/scala/http/FailureTests.scala b/community/ledger-service/http-json/src/failurelib/scala/http/FailureTests.scala index 88c16eb38..5a3abfb52 100644 --- a/community/ledger-service/http-json/src/failurelib/scala/http/FailureTests.scala +++ b/community/ledger-service/http-json/src/failurelib/scala/http/FailureTests.scala @@ -47,7 +47,7 @@ abstract class FailureTests protected override final def testId = getClass.getSimpleName private def headersWithParties(actAs: List[domain.Party]) = - Future successful headersWithPartyAuth(actAs, List(), Some(ledgerId.unwrap)) + Future successful headersWithPartyAuth(actAs, List()) val availabilitySecurity: SecurityTest = SecurityTest(property = Availability, asset = "Ledger Service HTTP JSON") @@ -389,7 +389,7 @@ abstract class FailureTests ) _ = status shouldBe a[StatusCodes.Success] cid = getContractId(getResult(r)) - jwt <- jwtForParties(uri)(List(p), List(), ledgerId.unwrap) + jwt <- jwtForParties(uri)(List(p), List()) r <- (singleClientQueryStream( jwt, uri, @@ -411,7 +411,7 @@ abstract class FailureTests ) cid = getContractId(getResult(r)) _ = status shouldBe a[StatusCodes.Success] - jwt <- jwtForParties(uri)(List(p), List(), ledgerId.unwrap) + jwt <- jwtForParties(uri)(List(p), List()) (stop, source) = singleClientQueryStream( jwt, uri, diff --git a/community/ledger-service/http-json/src/failurelib/scala/http/HttpTestFixture.scala b/community/ledger-service/http-json/src/failurelib/scala/http/HttpTestFixture.scala index 27d335dbe..3f36b3e62 100644 --- a/community/ledger-service/http-json/src/failurelib/scala/http/HttpTestFixture.scala +++ b/community/ledger-service/http-json/src/failurelib/scala/http/HttpTestFixture.scala @@ -68,7 +68,6 @@ trait HttpFailureTestFixture extends ToxicSandboxFixture with PostgresAroundAll Some(jdbcConfig_), None, wsConfig = Some(WebsocketConfig()), - ledgerIdOverwrite = Some(ledgerId), ) } } diff --git a/community/ledger-service/http-json/src/failurelib/scala/http/ToxicSandboxFixture.scala b/community/ledger-service/http-json/src/failurelib/scala/http/ToxicSandboxFixture.scala index 8788a7983..d977f0698 100644 --- a/community/ledger-service/http-json/src/failurelib/scala/http/ToxicSandboxFixture.scala +++ b/community/ledger-service/http-json/src/failurelib/scala/http/ToxicSandboxFixture.scala @@ -5,7 +5,6 @@ package com.daml.http import com.daml.bazeltools.BazelRunfiles import com.daml.integrationtest.CantonFixtureWithResource -import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.resources.{Resource, ResourceContext, ResourceOwner} import com.daml.ports.{LockedFreePort, Port} import com.daml.timer.RetryStrategy @@ -39,8 +38,6 @@ trait ToxicSandboxFixture override protected def beforeEach() = proxyClient.reset() - protected def ledgerId: LedgerId = LedgerId(config.ledgerIds.headOption.value) - protected def makeToxiproxyResource(ledger: Port): ResourceOwner[(Port, ToxiproxyClient, Proxy)] = new ResourceOwner[(Port, ToxiproxyClient, Proxy)] { val host = InetAddress.getLoopbackAddress diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthorizationError.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthorizationError.scala index 8363dcc3f..32bd44657 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthorizationError.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/AuthorizationError.scala @@ -19,11 +19,6 @@ object AuthorizationError { s"Claims were valid until $authorizedUntil, current time is $currentTime" } - final case class InvalidLedger(authorized: String, actual: String) extends AuthorizationError { - override val reason = - s"Claims are only valid for ledgerId '$authorized', actual ledgerId is '$actual'" - } - final case class InvalidParticipant(authorized: String, actual: String) extends AuthorizationError { override val reason = diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Authorizer.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Authorizer.scala index 6f04d9e84..f7fe17f95 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Authorizer.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Authorizer.scala @@ -30,7 +30,6 @@ import scala.util.{Failure, Success, Try} */ final class Authorizer( now: () => Instant, - ledgerId: String, participantId: String, userManagementStore: UserManagementStore, ec: ExecutionContext, @@ -53,7 +52,6 @@ final class Authorizer( jwtTimestampLeeway, None, ) // Don't use the grace period for the initial check - _ <- claims.validForLedger(ledgerId) _ <- claims.validForParticipant(participantId) } yield { () diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Claims.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Claims.scala index aa5af2e8a..c82b12be8 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Claims.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/Claims.scala @@ -29,7 +29,7 @@ case object ClaimIdentityProviderAdmin extends Claim /** Authorized to use all "public" services, i.e., * those that do not require admin rights and do not depend on any Daml party. - * Examples include the LedgerIdentityService or the PackageService. + * Examples include the VersionService or the PackageService. */ case object ClaimPublic extends Claim @@ -67,7 +67,6 @@ object ClaimSet { * Please use that file when writing or reviewing tests; and keep it up to date when adding new endpoints. * * @param claims List of [[Claim]]s describing the authorization this object describes. - * @param ledgerId If set, the claims will only be valid on the given ledger identifier. * @param participantId If set, the claims will only be valid on the given participant identifier. * @param applicationId If set, the claims will only be valid on the given application identifier. * @param expiration If set, the claims will cease to be valid at the given time. @@ -77,7 +76,6 @@ object ClaimSet { */ final case class Claims( claims: Seq[Claim], - ledgerId: Option[String], participantId: Option[String], applicationId: Option[String], expiration: Option[Instant], @@ -85,12 +83,6 @@ object ClaimSet { resolvedFromUser: Boolean, ) extends ClaimSet { - def validForLedger(id: String): Either[AuthorizationError, Unit] = - ledgerId match { - case Some(l) if l != id => Left(AuthorizationError.InvalidLedger(l, id)) - case _ => Right(()) - } - def validForParticipant(id: String): Either[AuthorizationError, Unit] = participantId match { case Some(p) if p != id => Left(AuthorizationError.InvalidParticipant(p, id)) @@ -181,7 +173,6 @@ object ClaimSet { /** A set of [[Claims]] that does not have any authorization */ val Empty: Claims = Claims( claims = List.empty[Claim], - ledgerId = None, participantId = None, applicationId = None, expiration = None, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/interceptor/AuthorizationInterceptor.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/interceptor/AuthorizationInterceptor.scala index fee25bcf0..2a2eaf579 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/interceptor/AuthorizationInterceptor.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/auth/interceptor/AuthorizationInterceptor.scala @@ -127,7 +127,6 @@ final case class AuthorizationInterceptor( Future.successful( ClaimSet.Claims( claims = AuthorizationInterceptor.convertUserRightsToClaims(userRights), - ledgerId = None, participantId = participantId, applicationId = Some(userId), expiration = expiration, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/completion/CompletionStreamRequest.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/completion/CompletionStreamRequest.scala index df39bec96..2307c6680 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/completion/CompletionStreamRequest.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/messages/command/completion/CompletionStreamRequest.scala @@ -4,10 +4,9 @@ package com.digitalasset.canton.ledger.api.messages.command.completion import com.daml.lf.data.Ref -import com.digitalasset.canton.ledger.api.domain.{LedgerId, ParticipantOffset} +import com.digitalasset.canton.ledger.api.domain.ParticipantOffset final case class CompletionStreamRequest( - ledgerId: Option[LedgerId], applicationId: Ref.ApplicationId, parties: Set[Ref.Party], offset: Option[ParticipantOffset], diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidator.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidator.scala index ba496d18a..ba40ed041 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidator.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidator.scala @@ -31,7 +31,6 @@ class CompletionServiceRequestValidator( "offset", ) } yield CompletionStreamRequest( - None, appId, parties, convertedOffset, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala index 4f5a708a5..3676b6a0a 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/api/validation/FieldValidator.scala @@ -9,12 +9,7 @@ import com.daml.lf.data.Ref.{PackageRef, Party, TypeConRef} import com.daml.lf.data.{Ref, Time} import com.daml.lf.value.Value.ContractId import com.digitalasset.canton.ledger.api.domain -import com.digitalasset.canton.ledger.api.domain.{ - IdentityProviderId, - JwksUrl, - LedgerId, - TemplateFilter, -} +import com.digitalasset.canton.ledger.api.domain.{IdentityProviderId, JwksUrl, TemplateFilter} import com.digitalasset.canton.ledger.api.util.TimestampConversion import com.digitalasset.canton.ledger.api.validation.ResourceAnnotationValidator.{ AnnotationsSizeExceededError, @@ -23,7 +18,6 @@ import com.digitalasset.canton.ledger.api.validation.ResourceAnnotationValidator } import com.digitalasset.canton.ledger.api.validation.ValidationErrors.* import com.digitalasset.canton.ledger.api.validation.ValueValidator.* -import com.digitalasset.canton.ledger.error.groups.RequestValidationErrors import com.digitalasset.canton.topology.DomainId import com.google.protobuf.timestamp.Timestamp import io.grpc.StatusRuntimeException @@ -31,21 +25,6 @@ import io.grpc.StatusRuntimeException import scala.util.{Failure, Success, Try} object FieldValidator { - def matchLedgerId( - ledgerId: LedgerId - )(receivedO: Option[LedgerId])(implicit - contextualizedErrorLogger: ContextualizedErrorLogger - ): Either[StatusRuntimeException, Option[LedgerId]] = receivedO match { - case None => Right(None) - case Some(`ledgerId`) => Right(Some(ledgerId)) - case Some(mismatching) => - import scalaz.syntax.tag.* - Left( - RequestValidationErrors.LedgerIdMismatch - .Reject(ledgerId.unwrap, mismatching.unwrap) - .asGrpcError - ) - } def requireNonEmptyString(s: String, fieldName: String)(implicit contextualizedErrorLogger: ContextualizedErrorLogger diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala index 51ee411df..3bedc9ce7 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriter.scala @@ -5,10 +5,9 @@ package com.digitalasset.canton.ledger.runner.common import com.daml.jwt.JwtTimestampLeeway import com.daml.lf.data.Ref -import com.daml.metrics.api.reporters.MetricsReporter import com.daml.ports.Port import com.digitalasset.canton.ledger.api.tls.TlsVersion.TlsVersion -import com.digitalasset.canton.ledger.api.tls.{SecretsUrl, TlsConfiguration, TlsVersion} +import com.digitalasset.canton.ledger.api.tls.{TlsConfiguration, TlsVersion} import com.digitalasset.canton.ledger.runner.common.OptConfigValue.{ optConvertEnabled, optProductHint, @@ -38,7 +37,7 @@ import pureconfig.configurable.{genericMapReader, genericMapWriter} import pureconfig.error.CannotConvert import pureconfig.generic.ProductHint import pureconfig.generic.semiauto.* -import pureconfig.{ConfigConvert, ConfigReader, ConfigWriter, ConvertHelpers} +import pureconfig.{ConfigConvert, ConfigReader, ConfigWriter} import scala.annotation.nowarn import scala.concurrent.duration.{Duration, FiniteDuration} @@ -63,32 +62,6 @@ class PureConfigReaderWriter(secure: Boolean = true) { .toRight(CannotConvert(str, Duration.getClass.getName, s"Could not convert $str")) } - implicit val metricReporterReader: ConfigReader[MetricsReporter] = { - ConfigReader.fromString[MetricsReporter](ConvertHelpers.catchReadError { s => - MetricsReporter.parseMetricsReporter(s) - }) - } - implicit val metricReporterWriter: ConfigWriter[MetricsReporter] = - ConfigWriter.toString { - case MetricsReporter.Console => "console" - case MetricsReporter.Csv(directory) => s"csv://${directory.toAbsolutePath.toString}" - case MetricsReporter.Graphite(address, prefix) => - s"graphite://${address.getHostName}:${address.getPort}/${prefix.getOrElse("")}" - case MetricsReporter.Prometheus(address) => - s"prometheus://${address.getHostName}:${address.getPort}" - } - - implicit val secretsUrlReader: ConfigReader[SecretsUrl] = - ConfigReader.fromString[SecretsUrl] { url => - Right(SecretsUrl.fromString(url)) - } - - implicit val secretsUrlWriter: ConfigWriter[SecretsUrl] = - ConfigWriter.toString { - case SecretsUrl.FromUrl(url) if !secure => url.toString - case _ => ReplaceSecretWithString - } - implicit val clientAuthReader: ConfigReader[ClientAuth] = ConfigReader.fromStringTry[ClientAuth](value => Try(ClientAuth.valueOf(value.toUpperCase))) implicit val clientAuthWriter: ConfigWriter[ClientAuth] = diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/DispatcherState.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/DispatcherState.scala index 187a177a1..da55ec0d1 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/DispatcherState.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/DispatcherState.scala @@ -67,15 +67,21 @@ class DispatcherState( } }) - def stopDispatcher(): Future[Unit] = blocking(synchronized { - - dispatcherStateRef match { - case DispatcherNotRunning | DispatcherStateShutdown => - logger.debug(s"$ServiceName already stopped, shutdown or never started.") - Future.unit - case DispatcherRunning(dispatcher) => - logger.info(s"Stopping active $ServiceName.") - dispatcherStateRef = DispatcherNotRunning + def stopDispatcher(): Future[Unit] = { + val dispatcherToCancel = blocking(synchronized { + dispatcherStateRef match { + case DispatcherNotRunning | DispatcherStateShutdown => + logger.debug(s"$ServiceName already stopped, shutdown or never started.") + None + case DispatcherRunning(dispatcher) => + logger.info(s"Stopping active $ServiceName.") + dispatcherStateRef = DispatcherNotRunning + Some(dispatcher) + } + }) + dispatcherToCancel match { + case None => Future.unit + case Some(dispatcher) => dispatcher .cancel(() => dispatcherNotRunning) .transform { @@ -87,14 +93,15 @@ class DispatcherState( f }(directEc) } - }) + } - private[platform] def shutdown(): Future[Unit] = blocking(synchronized { + private[platform] def shutdown(): Future[Unit] = { logger.info(s"Shutting down $ServiceName state.") - - val currentDispatcherState = dispatcherStateRef - dispatcherStateRef = DispatcherStateShutdown - + val currentDispatcherState = blocking(synchronized { + val currentDispatcherState = dispatcherStateRef + dispatcherStateRef = DispatcherStateShutdown + currentDispatcherState + }) currentDispatcherState match { case DispatcherNotRunning => logger.info(s"$ServiceName not running. Transitioned to shutdown.") @@ -119,7 +126,7 @@ class DispatcherState( f }(directEc) } - }) + } private def buildDispatcher(initializationOffset: Offset): Dispatcher[Offset] = Dispatcher( diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala index 6590c1694..544776384 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/ApiServiceOwner.scala @@ -3,7 +3,6 @@ package com.digitalasset.canton.platform.apiserver -import com.daml.buildinfo.BuildInfo import com.daml.jwt.JwtTimestampLeeway import com.daml.ledger.resources.ResourceOwner import com.daml.lf.data.Ref @@ -17,7 +16,6 @@ import com.digitalasset.canton.ledger.api.domain import com.digitalasset.canton.ledger.api.health.HealthChecks import com.digitalasset.canton.ledger.api.tls.TlsConfiguration import com.digitalasset.canton.ledger.api.util.TimeProvider -import com.digitalasset.canton.ledger.configuration.LedgerId import com.digitalasset.canton.ledger.localstore.api.{ IdentityProviderConfigStore, PartyRecordStore, @@ -75,7 +73,6 @@ object ApiServiceOwner { upgradingEnabled: Boolean, disableUpgradeValidation: Boolean, // immutable configuration parameters - ledgerId: LedgerId, participantId: Ref.ParticipantId, meteringReportKey: MeteringReportKey = CommunityKey, // objects @@ -114,7 +111,6 @@ object ApiServiceOwner { val authorizer = new Authorizer( Clock.systemUTC.instant _, - ledgerId, participantId, userManagementStore, servicesExecutionContext, @@ -130,7 +126,7 @@ object ApiServiceOwner { val healthChecksWithIndexService = healthChecks + ("index" -> indexService) val identityProviderConfigLoader = new IdentityProviderConfigLoader { - override def getIdentityProviderConfig(issuer: LedgerId)(implicit + override def getIdentityProviderConfig(issuer: String)(implicit loggingContext: LoggingContextWithTrace ): Future[domain.IdentityProviderConfig] = identityProviderConfigStore.getActiveIdentityProviderByIssuer(issuer)( @@ -207,7 +203,8 @@ object ApiServiceOwner { loggerFactory .getTracedLogger(getClass) .info( - s"Initialized API server version ${BuildInfo.Version} with ledger-id = $ledgerId, port = ${apiService.port}." + s"Initialized API server listening to port = ${apiService.port} ${if (tls.isDefined) "using tls" + else "without tls"}." ) apiService } diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala index 2789ffc8f..a1d892c57 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/apiserver/services/logging/package.scala @@ -13,7 +13,6 @@ import com.daml.logging.entries.{LoggingEntries, LoggingEntry, LoggingValue} import com.digitalasset.canton.ledger.api.domain.{ Commands, EventId, - LedgerId, ParticipantOffset, TransactionFilter, TransactionId, @@ -61,9 +60,6 @@ package object logging { private[services] def offset(offset: String): LoggingEntry = "offset" -> offset - private[services] def ledgerId(id: Option[LedgerId]): LoggingEntry = - "ledgerId" -> OfString(id.map(_.unwrap).getOrElse("")) - private[services] def commandId(id: String): LoggingEntry = "commandId" -> id diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala index 44ea9949b..fb62cde7d 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceImpl.scala @@ -25,7 +25,6 @@ import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.ledger.api.domain.{ Filters, InclusiveFilters, - LedgerId, PackageEntry, ParticipantOffset, TransactionFilter, @@ -71,7 +70,6 @@ import scala.concurrent.Future import scala.util.Success private[index] class IndexServiceImpl( - val ledgerId: LedgerId, participantId: Ref.ParticipantId, ledgerDao: LedgerReadDao, transactionsReader: LedgerDaoTransactionsReader, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala index 05e5e4189..7cd849f55 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/index/IndexServiceOwner.scala @@ -10,7 +10,6 @@ import com.daml.lf.engine.Engine import com.daml.resources.ProgramResource.StartupException import com.daml.timer.RetryStrategy import com.digitalasset.canton.ledger.api.domain -import com.digitalasset.canton.ledger.api.domain.LedgerId import com.digitalasset.canton.ledger.error.IndexErrors.IndexDbException import com.digitalasset.canton.ledger.offset.Offset import com.digitalasset.canton.ledger.participant.state.index.v2.IndexService @@ -44,7 +43,6 @@ import scala.util.control.NoStackTrace final class IndexServiceOwner( config: IndexServiceConfig, dbSupport: DbSupport, - ledgerId: LedgerId, servicesExecutionContext: ExecutionContext, metrics: Metrics, engine: Engine, @@ -109,7 +107,6 @@ final class IndexServiceOwner( )(inMemoryFanOutExecutionContext) indexService = new IndexServiceImpl( - ledgerId = ledgerId, participantId = participantId, ledgerDao = ledgerDao, transactionsReader = bufferedTransactionsReader, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/HaCoordinator.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/HaCoordinator.scala index 9cb90516e..28a2432fd 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/HaCoordinator.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/HaCoordinator.scala @@ -101,8 +101,17 @@ object HaCoordinator { override def protectedExecution( initializeExecution: ConnectionInitializer => Future[Handle] ): Handle = { - def acquireLock(connection: Connection, lockId: LockId, lockMode: LockMode): Lock = { - logger.debug(s"Acquiring lock $lockId $lockMode") + def acquireLock( + connection: Connection, + lockId: LockId, + lockMode: LockMode, + fromHealthCheck: Boolean = false, + ): Lock = { + // Reduce log level if we check the lock acquisition from the health check + if (fromHealthCheck) + logger.trace(s"Acquiring lock $lockId $lockMode") + else + logger.debug(s"Acquiring lock $lockId $lockMode") storageBackend .tryAcquire(lockId, lockMode)(connection) .getOrElse( @@ -110,8 +119,8 @@ object HaCoordinator { ) } - def acquireMainLock(connection: Connection): Unit = - acquireLock(connection, indexerLockId, LockMode.Exclusive).discard + def acquireMainLock(connection: Connection, fromHealthCheck: Boolean = false): Unit = + acquireLock(connection, indexerLockId, LockMode.Exclusive, fromHealthCheck).discard preemptableSequence.executeSequence { sequenceHelper => import sequenceHelper.* @@ -139,7 +148,13 @@ object HaCoordinator { waitMillisBetweenRetries = haConfig.workerLockAcquireRetryTimeout.duration.toMillis, maxAmountOfRetries = haConfig.workerLockAcquireMaxRetries.unwrap, retryable = _.isInstanceOf[CannotAcquireLockException], - )(acquireLock(mainConnection, indexerWorkerLockId, LockMode.Exclusive)) + )( + acquireLock( + mainConnection, + indexerWorkerLockId, + LockMode.Exclusive, + ) + ) _ = logger.info( "Previous IndexDB HA Coordinator finished work, starting DB connectivity polling" ) @@ -153,7 +168,7 @@ object HaCoordinator { mainLockChecker <- go[PollingChecker]( new PollingChecker( periodMillis = haConfig.mainLockCheckerPeriod.duration.toMillis, - checkBody = acquireMainLock(mainConnection), + checkBody = acquireMainLock(mainConnection, fromHealthCheck = true), killSwitch = handle.killSwitch, // meaning: this PollingChecker will shut down the main preemptableSequence loggerFactory = loggerFactory, diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PollingChecker.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PollingChecker.scala index b9a9022fb..a3a247fab 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PollingChecker.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PollingChecker.scala @@ -63,7 +63,7 @@ class PollingChecker( }) private def scheduledCheck(): Unit = blocking(synchronized { - logger.debug(s"Scheduled checking...") + logger.trace(s"Scheduled checking...") // Timer can fire at most one additional TimerTask after being cancelled. This is to safeguard that corner case. if (!closed) { checkInternal() @@ -73,7 +73,7 @@ class PollingChecker( private def checkInternal(): Unit = blocking(synchronized { Try(checkBody) match { case Success(_) => - logger.debug(s"Check successful.") + logger.trace(s"Check successful.") case Failure(ex) => logger.info(s"Check failed (${ex.getMessage}). Calling KillSwitch/abort.") diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PreemptableSequence.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PreemptableSequence.scala index d84f5543f..5fe20b1d9 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PreemptableSequence.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/indexer/ha/PreemptableSequence.scala @@ -191,14 +191,19 @@ object PreemptableSequence { override def handle: Handle = resultHandle } - def release: Future[Unit] = blocking(synchronized { - releaseStack match { - case Nil => Future.unit - case x :: xs => - releaseStack = xs - x().transformWith(_ => release) + def release: Future[Unit] = { + blocking(synchronized { + releaseStack match { + case Nil => None + case x :: xs => + releaseStack = xs + Some((x, xs)) + } + }) match { + case None => Future.unit + case Some((x, xs)) => x().transformWith(_ => release) } - }) + } sequence(helper).transformWith(fResult => release.transform(_ => fResult)).onComplete { case Success(_) => diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala index 70572445e..c598dce8e 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/cache/StateCache.scala @@ -95,6 +95,7 @@ private[platform] case class StateCache[K, V]( * @param key the key at which to update the cache * @param fetchAsync fetches asynchronously the value for key `key` at the current cache index */ + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) def putAsync(key: K, fetchAsync: Offset => Future[V])(implicit traceContext: TraceContext ): Future[V] = Timed.value( diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala index e40d0cbf2..858d4ee5e 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDao.scala @@ -10,7 +10,7 @@ import com.daml.lf.data.Time.Timestamp import com.daml.lf.engine.Engine import com.daml.lf.transaction.{BlindingInfo, CommittedTransaction} import com.daml.logging.entries.LoggingEntry -import com.digitalasset.canton.ledger.api.domain.{LedgerId, ParticipantId} +import com.digitalasset.canton.ledger.api.domain.ParticipantId import com.digitalasset.canton.ledger.api.health.{HealthStatus, ReportsHealth} import com.digitalasset.canton.ledger.offset.Offset import com.digitalasset.canton.ledger.participant.state.index.v2.MeteringStore.ReportData @@ -95,8 +95,7 @@ private class JdbcLedgerDao( ) override def initialize( - ledgerId: LedgerId, - participantId: ParticipantId, + participantId: ParticipantId )(implicit loggingContext: LoggingContextWithTrace): Future[Unit] = dbDispatcher .executeSql(metrics.index.db.initializeLedgerParameters)( diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala index 9b66235ee..e223506bb 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/dao/LedgerDao.scala @@ -16,7 +16,7 @@ import com.daml.ledger.api.v2.update_service.{ import com.daml.lf.data.Ref import com.daml.lf.data.Time.Timestamp import com.daml.lf.transaction.{BlindingInfo, CommittedTransaction} -import com.digitalasset.canton.ledger.api.domain.{LedgerId, ParticipantId} +import com.digitalasset.canton.ledger.api.domain.ParticipantId import com.digitalasset.canton.ledger.api.health.ReportsHealth import com.digitalasset.canton.ledger.offset.Offset import com.digitalasset.canton.ledger.participant.state.index.v2.MeteringStore.ReportData @@ -191,12 +191,10 @@ private[platform] trait LedgerWriteDao extends ReportsHealth { * * This method must succeed at least once before other LedgerWriteDao methods may be used. * - * @param ledgerId the ledger id to be stored * @param participantId the participant id to be stored */ def initialize( - ledgerId: LedgerId, - participantId: ParticipantId, + participantId: ParticipantId )(implicit loggingContext: LoggingContextWithTrace): Future[Unit] def storeRejection( diff --git a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiter.scala b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiter.scala index 807932e76..a4f568065 100644 --- a/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiter.scala +++ b/community/ledger/ledger-api-core/src/main/scala/com/digitalasset/canton/platform/store/utils/ConcurrencyLimiter.scala @@ -23,9 +23,8 @@ class QueueBasedConcurrencyLimiter( private val waiting = mutable.Queue[Task]() private var running: Int = 0 - override def execute[T](task: => Future[T]): Future[T] = blocking(synchronized { + override def execute[T](task: => Future[T]): Future[T] = { val promise = Promise[T]() - val waitingTask = () => { task.andThen { case result => blocking(synchronized { @@ -36,18 +35,20 @@ class QueueBasedConcurrencyLimiter( }(executionContext).discard } - waiting.enqueue(waitingTask) - startTasks() + blocking(synchronized { + waiting.enqueue(waitingTask) + startTasks() + }) promise.future - }) + } @SuppressWarnings(Array("org.wartremover.warts.While")) - private def startTasks(): Unit = blocking(synchronized { + private def startTasks(): Unit = + // No need to put this into a synchronized block because all call sites are inside synchronized blocks while (running < parallelism && waiting.nonEmpty) { val head = waiting.dequeue() running = running + 1 head() } - }) } diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/AuthorizerSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/AuthorizerSpec.scala index a6633cea7..b8fc57a3a 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/AuthorizerSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/AuthorizerSpec.scala @@ -77,7 +77,6 @@ class AuthorizerSpec private def authorizer() = new Authorizer( () => Instant.ofEpochSecond(1337L), - "some-ledger-id", "participant-id", mock[UserManagementStore], mock[ExecutionContext], diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala index 7716a0fcb..485b18360 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/auth/StreamAuthorizationComponentSpec.scala @@ -183,13 +183,11 @@ class StreamAuthorizationComponentSpec val partyId1 = Ref.Party.assertFromString("party1") private def test(body: Fixture => Future[Any]): Future[Assertion] = { - val ledgerId = "ledger-id" val participantId = "participant-id" val nowRef = new AtomicReference(Instant.now()) val partyId2 = Ref.Party.assertFromString("party2") val claimSetFixture = ClaimSet.Claims( claims = List[Claim](ClaimPublic, ClaimReadAsParty(partyId1), ClaimReadAsParty(partyId2)), - ledgerId = Some(ledgerId), participantId = Some(participantId), applicationId = Some(userId), expiration = Some(nowRef.get().plusSeconds(10)), @@ -224,7 +222,6 @@ class StreamAuthorizationComponentSpec .isRight shouldBe true val authorizer = new Authorizer( now = () => nowRef.get(), - ledgerId = ledgerId, participantId = participantId, userManagementStore = userManagementStore, ec = ec, diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidatorTest.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidatorTest.scala index 79bc9c6bf..ebaab2ddc 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidatorTest.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/CompletionServiceRequestValidatorTest.scala @@ -25,7 +25,6 @@ class CompletionServiceRequestValidatorTest Some(ParticipantOffset(ParticipantOffset.Value.Absolute(absoluteOffset))), ) private val completionReq = CompletionStreamRequest( - None, Ref.ApplicationId.assertFromString(expectedApplicationId), List(party).toSet, Some(domain.ParticipantOffset.Absolute(Ref.LedgerString.assertFromString(absoluteOffset))), @@ -43,7 +42,7 @@ class CompletionServiceRequestValidatorTest inside( validator.validateGrpcCompletionStreamRequest(grpcCompletionReq) ) { case Right(req) => - req shouldBe completionReq.copy(ledgerId = None) + req shouldBe completionReq } } @@ -90,7 +89,7 @@ class CompletionServiceRequestValidatorTest inside( validator.validateCompletionStreamRequest(completionReq, ledgerEnd) ) { case Right(req) => - req shouldBe completionReq.copy(ledgerId = None) + req shouldBe completionReq } } diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala index af0988cc4..bdcf4ecdb 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/api/validation/SubmitRequestValidatorTest.scala @@ -222,15 +222,6 @@ class SubmitRequestValidatorTest ) } - "allow missing ledgerId" in { - testedCommandValidator.validateCommands( - api.commands, - internal.ledgerTime, - internal.submittedAt, - internal.maxDeduplicationDuration, - ) shouldEqual Right(internal.emptyCommands) - } - "tolerate a missing workflowId" in { testedCommandValidator.validateCommands( api.commands.withWorkflowId(""), diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala index 8548b0758..a94173b2d 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/ArbitraryConfig.scala @@ -8,7 +8,6 @@ import com.daml.lf.VersionRange import com.daml.lf.interpretation.Limits import com.daml.lf.language.LanguageVersion import com.daml.lf.transaction.ContractKeyUniquenessMode -import com.daml.metrics.api.reporters.MetricsReporter import com.digitalasset.canton.config.NonNegativeFiniteDuration import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, Port} import com.digitalasset.canton.ledger.api.tls.{TlsConfiguration, TlsVersion} @@ -24,7 +23,6 @@ import org.scalacheck.Gen import java.io.File import java.net.InetSocketAddress -import java.nio.file.Paths import java.time.Duration import java.time.temporal.ChronoUnit @@ -78,23 +76,6 @@ object ArbitraryConfig { port <- Gen.chooseNum(1, 65535) } yield new InetSocketAddress(host, port) - val graphiteReporter: Gen[MetricsReporter] = for { - address <- inetSocketAddress - prefixStr <- Gen.alphaStr if prefixStr.nonEmpty - prefix <- Gen.option(prefixStr) - } yield MetricsReporter.Graphite(address, prefix) - - val prometheusReporter: Gen[MetricsReporter] = for { - address <- inetSocketAddress - } yield MetricsReporter.Prometheus(address) - - val csvReporter: Gen[MetricsReporter] = for { - path <- Gen.alphaStr - } yield MetricsReporter.Csv(Paths.get(path).toAbsolutePath) - - val metricsReporter: Gen[MetricsReporter] = - Gen.oneOf(graphiteReporter, prometheusReporter, csvReporter, Gen.const(MetricsReporter.Console)) - val clientAuth = Gen.oneOf(ClientAuth.values().toList) val tlsVersion = Gen.oneOf(TlsVersion.allVersions) @@ -112,7 +93,6 @@ object ArbitraryConfig { keyCertChainFile.map(fileName => new File(fileName)), keyFile.map(fileName => new File(fileName)), trustCertCollectionFile.map(fileName => new File(fileName)), - None, clientAuth, enableCertRevocationChecking, minimumServerProtocolVersion, diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala index afc2a3084..16ee1d905 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/ledger/runner/common/PureConfigReaderWriterSpec.scala @@ -4,9 +4,7 @@ package com.digitalasset.canton.ledger.runner.common import com.daml.jwt.JwtTimestampLeeway -import com.daml.metrics.api.reporters.MetricsReporter -import com.digitalasset.canton.ledger.api.tls.{SecretsUrl, TlsConfiguration, TlsVersion} -import com.digitalasset.canton.ledger.runner.common +import com.digitalasset.canton.ledger.api.tls.{TlsConfiguration, TlsVersion} import com.digitalasset.canton.ledger.runner.common.OptConfigValue.{ optReaderEnabled, optWriterEnabled, @@ -33,8 +31,6 @@ import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks import pureconfig.error.ConfigReaderFailures import pureconfig.{ConfigConvert, ConfigReader, ConfigSource, ConfigWriter} -import java.net.InetSocketAddress -import java.nio.file.Path import java.time.Duration import scala.annotation.nowarn import scala.reflect.{ClassTag, classTag} @@ -75,7 +71,6 @@ class PureConfigReaderWriterSpec val readerWriter = new PureConfigReaderWriter(secure) import readerWriter.* testReaderWriterIsomorphism(secure, ArbitraryConfig.duration) - testReaderWriterIsomorphism(secure, ArbitraryConfig.metricsReporter) testReaderWriterIsomorphism(secure, Gen.oneOf(TlsVersion.allVersions)) testReaderWriterIsomorphism(secure, ArbitraryConfig.tlsConfiguration) testReaderWriterIsomorphism(secure, ArbitraryConfig.port) @@ -207,46 +202,6 @@ class PureConfigReaderWriterSpec .prettyPrint(0) should include("Unknown key") } - behavior of "MetricsReporter" - - it should "read/write against predefined values" in { - def compare( - reporter: MetricsReporter, - expectedString: String, - ): Assertion = { - metricReporterWriter.to(reporter) shouldBe fromAnyRef(expectedString) - metricReporterReader.from(fromAnyRef(expectedString)).value shouldBe reporter - } - compare( - MetricsReporter.Prometheus(new InetSocketAddress("localhost", 1234)), - "prometheus://localhost:1234", - ) - compare( - MetricsReporter.Graphite(new InetSocketAddress("localhost", 1234)), - "graphite://localhost:1234/", - ) - compare( - MetricsReporter.Graphite(new InetSocketAddress("localhost", 1234), Some("test")), - "graphite://localhost:1234/test", - ) - val path = Path.of("test").toAbsolutePath - compare( - MetricsReporter.Csv(path), - "csv://" + path.toString, - ) - compare(MetricsReporter.Console, "console") - } - - behavior of "SecretsUrl" - - it should "read/write against predefined values" in { - val secretUrl = "https://www.daml.com/secrets.json" - secretsUrlReader.from(fromAnyRef(secretUrl)).value shouldBe SecretsUrl.fromString(secretUrl) - secretsUrlWriter.to(SecretsUrl.fromString(secretUrl)) shouldBe fromAnyRef("") - new common.PureConfigReaderWriter(false).secretsUrlWriter - .to(SecretsUrl.fromString(secretUrl)) shouldBe fromAnyRef(secretUrl) - } - behavior of "Seeding" it should "read/write against predefined values" in { diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala index 1874b04ef..9fb8cfbdc 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/IndexComponentTest.scala @@ -10,7 +10,6 @@ import com.daml.lf.engine.{Engine, EngineConfig} import com.daml.lf.language.{LanguageMajorVersion, LanguageVersion} import com.daml.lf.transaction.test.{NodeIdTransactionBuilder, TestNodeBuilder} import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.ledger.api.domain.LedgerId import com.digitalasset.canton.ledger.api.health.HealthStatus import com.digitalasset.canton.ledger.offset.Offset import com.digitalasset.canton.ledger.participant.state.index.v2.IndexService @@ -144,7 +143,6 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest { ) indexService <- new IndexServiceOwner( dbSupport = dbSupport, - ledgerId = LedgerId(IndexComponentTest.TestLedgerId), config = IndexServiceConfig(), participantId = Ref.ParticipantId.assertFromString(IndexComponentTest.TestParticipantId), metrics = Metrics.ForTesting, @@ -184,7 +182,6 @@ trait IndexComponentTest extends PekkoBeforeAndAfterAll with BaseTest { object IndexComponentTest { - val TestLedgerId = "index-component-test-ledger-id" val TestParticipantId = "index-component-test-participant-id" val maxUpdateCount = 1000000 diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala index bbb1e709c..d50491e36 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/services/command/CommandServiceImplSpec.scala @@ -15,7 +15,6 @@ import com.daml.ledger.api.v2.value.{Identifier, Record, RecordField, Value} import com.daml.ledger.resources.{ResourceContext, ResourceOwner} import com.daml.lf.data.Ref import com.daml.tracing.DefaultOpenTelemetry -import com.digitalasset.canton.ledger.api.domain.LedgerId import com.digitalasset.canton.ledger.api.validation.{ CommandsValidator, ValidateUpgradingPackageResolutions, @@ -290,7 +289,6 @@ object CommandServiceImplSpec { private val OkStatus = StatusProto.of(Status.Code.OK.value, "", Seq.empty) - val ledgerId: LedgerId = LedgerId("ledger ID") val commandId = "command ID" val applicationId = "application ID" val submissionId = Ref.SubmissionId.assertFromString("submissionId") diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala index b66678b6b..459b57a86 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/apiserver/validation/ErrorFactoriesSpec.scala @@ -533,28 +533,6 @@ class ErrorFactoriesSpec ) } - "return a ledgerIdMismatch error" in { - val msg = - s"LEDGER_ID_MISMATCH(11,$truncatedCorrelationId): Ledger ID 'received' not found. Actual Ledger ID is 'expected'." - assertError( - RequestValidationErrors.LedgerIdMismatch - .Reject("expected", "received")(contextualizedErrorLogger) - )( - code = Code.NOT_FOUND, - message = msg, - details = Seq[ErrorDetails.ErrorDetail]( - ErrorDetails.ErrorInfoDetail( - "LEDGER_ID_MISMATCH", - Map("category" -> "11", "definite_answer" -> "true", "test" -> getClass.getSimpleName), - ), - expectedCorrelationIdRequestInfo, - ), - logLevel = Level.INFO, - logMessage = msg, - logErrorContextRegEx = expectedLocationRegex, - ) - } - "return a participantPrunedDataAccessed error" in { val msg = s"PARTICIPANT_PRUNED_DATA_ACCESSED(9,$truncatedCorrelationId): my message" assertError( diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala index e0308ad47..f374d5304 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/RecoveringIndexerIntegrationSpec.scala @@ -7,7 +7,6 @@ import com.daml.ledger.resources.ResourceOwner import com.daml.lf.data.Ref.{Party, SubmissionId} import com.daml.lf.data.{Ref, Time} import com.digitalasset.canton.ledger.api.health.HealthStatus -import com.digitalasset.canton.ledger.configuration.LedgerId import com.digitalasset.canton.ledger.offset.Offset import com.digitalasset.canton.ledger.participant.state.v2.{ InternalStateServiceProviderImpl, @@ -239,7 +238,6 @@ class RecoveringIndexerIntegrationSpec restartDelay: config.NonNegativeFiniteDuration = config.NonNegativeFiniteDuration.ofMillis(100), )(implicit traceContext: TraceContext): ResourceOwner[(WritePartyService, DbSupport)] = { - val ledgerId = Ref.LedgerString.assertFromString(s"ledger-$testId") val participantId = Ref.ParticipantId.assertFromString(s"participant-$testId") val jdbcUrl = s"jdbc:h2:mem:${getClass.getSimpleName.toLowerCase()}-$testId;db_close_delay=-1;db_close_on_exit=false" @@ -249,7 +247,7 @@ class RecoveringIndexerIntegrationSpec for { actorSystem <- ResourceOwner.forActorSystem(() => ActorSystem()) materializer <- ResourceOwner.forMaterializer(() => Materializer(actorSystem)) - participantState <- newParticipantState(ledgerId, participantId)(materializer, traceContext) + participantState <- newParticipantState(participantId)(materializer, traceContext) servicesExecutionContext <- ResourceOwner .forExecutorService(() => Executors.newWorkStealingPool()) .map(ExecutionContext.fromExecutorService) @@ -330,14 +328,14 @@ object RecoveringIndexerIntegrationSpec { Ref.SubmissionId.assertFromString(UUID.randomUUID().toString) private trait ParticipantStateFactory { - def apply(ledgerId: LedgerId, participantId: Ref.ParticipantId)(implicit + def apply(participantId: Ref.ParticipantId)(implicit materializer: Materializer, traceContext: TraceContext, ): ResourceOwner[ParticipantState] } private object InMemoryPartyParticipantState extends ParticipantStateFactory { - override def apply(ledgerId: LedgerId, participantId: Ref.ParticipantId)(implicit + override def apply(participantId: Ref.ParticipantId)(implicit materializer: Materializer, traceContext: TraceContext, ): ResourceOwner[ParticipantState] = { @@ -365,11 +363,11 @@ object RecoveringIndexerIntegrationSpec { } private object ParticipantStateThatFailsOften extends ParticipantStateFactory { - override def apply(ledgerId: LedgerId, participantId: Ref.ParticipantId)(implicit + override def apply(participantId: Ref.ParticipantId)(implicit materializer: Materializer, traceContext: TraceContext, ): ResourceOwner[ParticipantState] = - InMemoryPartyParticipantState(ledgerId, participantId) + InMemoryPartyParticipantState(participantId) .map { case (readingDelegate, writeDelegate) => var lastFailure: Option[Offset] = None // This spy inserts a failure after each state update to force the indexer to restart. diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala index 1c908bed1..7e48cc7af 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/indexer/ha/EndlessReadService.scala @@ -11,7 +11,6 @@ import com.daml.lf.transaction.test.{TestNodeBuilder, TreeTransactionBuilder} import com.daml.lf.transaction.{CommittedTransaction, TransactionNodeStatistics} import com.daml.lf.value.Value import com.digitalasset.canton.ledger.api.health.HealthStatus -import com.digitalasset.canton.ledger.configuration.LedgerId import com.digitalasset.canton.ledger.offset.Offset import com.digitalasset.canton.ledger.participant.state.v2.{ CompletionInfo, @@ -152,7 +151,6 @@ final case class EndlessReadService( } object EndlessReadService { - val ledgerId: LedgerId = "EndlessReadService" val participantId: Ref.ParticipantId = Ref.ParticipantId.assertFromString("EndlessReadServiceParticipant") val party: Ref.Party = Ref.Party.assertFromString("operator") diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala index 29d31c9c3..978411733 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/dao/JdbcLedgerDaoBackend.scala @@ -11,7 +11,7 @@ import com.daml.lf.language.{LanguageMajorVersion, LanguageVersion} import com.daml.metrics.api.MetricName import com.daml.resources.PureResource import com.digitalasset.canton.BaseTest -import com.digitalasset.canton.ledger.api.domain.{LedgerId, ParticipantId} +import com.digitalasset.canton.ledger.api.domain.ParticipantId import com.digitalasset.canton.logging.LoggingContextWithTrace.withNewLoggingContext import com.digitalasset.canton.logging.SuppressingLogger import com.digitalasset.canton.metrics.CantonLabeledMetricsFactory.NoOpMetricsFactory @@ -25,10 +25,7 @@ import com.digitalasset.canton.platform.config.{ import com.digitalasset.canton.platform.store.DbSupport.{ConnectionPoolConfig, DbConfig} import com.digitalasset.canton.platform.store.backend.StorageBackendFactory import com.digitalasset.canton.platform.store.cache.MutableLedgerEndCache -import com.digitalasset.canton.platform.store.dao.JdbcLedgerDaoBackend.{ - TestLedgerId, - TestParticipantId, -} +import com.digitalasset.canton.platform.store.dao.JdbcLedgerDaoBackend.TestParticipantId import com.digitalasset.canton.platform.store.dao.events.{CompressionStrategy, ContractLoader} import com.digitalasset.canton.platform.store.interning.StringInterningView import com.digitalasset.canton.platform.store.{DbSupport, DbType, FlywayMigrations} @@ -41,9 +38,6 @@ import scala.concurrent.{Await, ExecutionContext} object JdbcLedgerDaoBackend { - private val TestLedgerId: LedgerId = - LedgerId("test-ledger") - private val TestParticipantIdRef = Ref.ParticipantId.assertFromString("test-participant") @@ -174,7 +168,7 @@ private[dao] trait JdbcLedgerDaoBackend extends PekkoBeforeAndAfterAll with Base acsIdFetchingParallelism = 2, acsContractFetchingParallelism = 2, ).acquire() - _ <- Resource.fromFuture(dao.initialize(TestLedgerId, TestParticipantId)) + _ <- Resource.fromFuture(dao.initialize(TestParticipantId)) initialLedgerEnd <- Resource.fromFuture(dao.lookupLedgerEnd()) _ = ledgerEndCache.set(initialLedgerEnd.lastOffset -> initialLedgerEnd.lastEventSeqId) } yield dao diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAround.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAround.scala index 45cbd66cf..71ddc03be 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAround.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/oracle/OracleAround.scala @@ -167,7 +167,7 @@ object OracleAround { logger.info(s"Starting Oracle container $dockerImage / $dbName...") oracleContainer.start() logger.info(s"Started Oracle container $dockerImage / $dbName.") - val host: String = if (hostToNetwork) "localhost" else oracleContainer.getContainerIpAddress + val host: String = if (hostToNetwork) "localhost" else oracleContainer.getHost val port: Int = if (hostToNetwork) Config.defaultPort else oracleContainer.getFirstMappedPort logger.info(s"Using Oracle Container instance at $host:$port") OracleServer( diff --git a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAround.scala b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAround.scala index 0b2d5562c..8b3f76884 100644 --- a/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAround.scala +++ b/community/ledger/ledger-api-core/src/test/scala/com/digitalasset/canton/platform/store/testing/postgresql/PostgresAround.scala @@ -45,7 +45,7 @@ trait PostgresAround { logger.info(s"Starting PostgreSQL Container...") container.start() logger.info(s"PostgreSQL Container started.") - val hostName = container.getContainerIpAddress + val hostName = container.getHost val port = container.getFirstMappedPort server.set( PostgresServer( diff --git a/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/Config.scala b/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/Config.scala index 5ecbca8e3..b0bd3bf86 100644 --- a/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/Config.scala +++ b/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/Config.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.ledger.indexerbenchmark import com.daml.lf.data.Ref -import com.daml.metrics.api.reporters.MetricsReporter import com.digitalasset.canton.config.RequireTypes.NonNegativeInt import com.digitalasset.canton.platform.config.IndexServiceConfig import com.digitalasset.canton.platform.config.Readers.* @@ -22,7 +21,6 @@ import java.time.Duration final case class Config( updateCount: Option[Long], updateSource: String, - metricsReporter: Option[MetricsReporter], metricsReportingInterval: Duration, indexServiceConfig: IndexServiceConfig, indexerConfig: IndexerConfig, @@ -36,7 +34,6 @@ object Config { private[indexerbenchmark] val DefaultConfig: Config = Config( updateCount = None, updateSource = "", - metricsReporter = None, metricsReportingInterval = Duration.ofSeconds(1), indexServiceConfig = IndexServiceConfig(), indexerConfig = IndexerConfig(), @@ -131,12 +128,6 @@ object Config { .action((value, config) => config.copy(minUpdateRate = Some(value))) .discard - opt[MetricsReporter]("metrics-reporter") - .optional() - .text(s"Start a metrics reporter. ${MetricsReporter.cliHint}") - .action((reporter, config) => config.copy(metricsReporter = Some(reporter))) - .discard - opt[Duration]("metrics-reporting-interval") .optional() .text("Set metric reporting interval.") diff --git a/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala b/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala index 8fc5a65ba..3fcfa051d 100644 --- a/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala +++ b/community/ledger/ledger-api-tools/src/main/scala/com/digitalasset/canton/ledger/indexerbenchmark/IndexerBenchmark.scala @@ -4,12 +4,8 @@ package com.digitalasset.canton.ledger.indexerbenchmark import com.daml.ledger.resources.{Resource, ResourceContext, ResourceOwner} -import com.daml.metrics.JvmMetricSet import com.daml.metrics.api.MetricName -import com.daml.metrics.api.opentelemetry.OpenTelemetryMetricsFactory -import com.daml.metrics.api.testing.{InMemoryMetricsFactory, ProxyMetricsFactory} import com.daml.resources -import com.daml.telemetry.OpenTelemetryOwner import com.digitalasset.canton.DiscardOps import com.digitalasset.canton.concurrent.DirectExecutionContext import com.digitalasset.canton.ledger.api.health.{HealthStatus, Healthy} @@ -20,7 +16,7 @@ import com.digitalasset.canton.ledger.participant.state.v2.{ Update, } import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.metrics.Metrics +import com.digitalasset.canton.metrics.{CantonLabeledMetricsFactory, Metrics} import com.digitalasset.canton.platform.LedgerApiServer import com.digitalasset.canton.platform.indexer.ha.HaConfig import com.digitalasset.canton.platform.indexer.{Indexer, IndexerServiceOwner, JdbcIndexer} @@ -65,9 +61,8 @@ class IndexerBenchmark extends NamedLogging { println("Creating read service and indexer...") val readService = createReadService(updates) - + val metrics = new Metrics(MetricName("noop"), CantonLabeledMetricsFactory.NoOpMetricsFactory) val resource = for { - metrics <- metricsResource(config).acquire() servicesExecutionContext <- ResourceOwner .forExecutorService(() => Executors.newWorkStealingPool()) .map(ExecutionContext.fromExecutorService) @@ -150,29 +145,6 @@ class IndexerBenchmark extends NamedLogging { ) .acquire() - private def metricsResource(config: Config) = { - OpenTelemetryOwner(setAsGlobal = true, config.metricsReporter, Seq.empty).flatMap { - openTelemetry => - val openTelemetryFactory = - new OpenTelemetryMetricsFactory(openTelemetry.getMeter("indexer-benchmark")) - val inMemoryMetricFactory = new InMemoryMetricsFactory - JvmMetricSet.registerObservers() - val metrics = new Metrics( - MetricName("test"), - new ProxyMetricsFactory(openTelemetryFactory, inMemoryMetricFactory), - ) - config.metricsReporter - .fold(ResourceOwner.unit) { _ => - noTracingLogger.warn("metrics reporting is not supported yet") - ResourceOwner.unit - /*ResourceOwner - .forCloseable(() => reporter.register(metrics.registry)) - .map(_.start(config.metricsReportingInterval.getSeconds, TimeUnit.SECONDS))*/ - } - .map(_ => metrics) - } - } - private[this] def createReadService( updates: Source[(Offset, Traced[Update]), NotUsed] ): ReadService = { @@ -207,7 +179,3 @@ class IndexerBenchmark extends NamedLogging { sys.exit(0) } } - -object IndexerBenchmark { - private val LedgerId = "IndexerBenchmarkLedger" -} diff --git a/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml index a7802ad68..48ad62203 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/carbonv1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: carbonv1-tests source: . version: 3.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml index 162447d6a..c78116377 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/carbonv2/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: carbonv2-tests data-dependencies: - ../../../../scala-2.13/resource_managed/main/carbonv1-tests-3.0.0.dar diff --git a/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml index 5f8ddd5bc..5286bcd20 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/experimental/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: experimental-tests source: . version: 3.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml index efa7bc4f2..7f0bc38fb 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/model/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: model-tests source: . version: 3.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml index a5e1478ab..da0234cca 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/package_management/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: package-management-tests source: . version: 3.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml index 5baa93cb0..42b5f8d4c 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/semantic/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: semantic-tests source: . version: 3.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml index 8f624afad..9f31d3444 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/upgrade/1.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: upgrade-tests source: . version: 1.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml index c32083c43..19010dee7 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/upgrade/2.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: upgrade-tests source: . version: 2.0.0 diff --git a/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml b/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml index ee4935019..ed68f7f60 100644 --- a/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml +++ b/community/ledger/ledger-common-dars/src/main/daml/upgrade/3.0.0/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 name: upgrade-tests source: . version: 3.0.0 diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/domain/package.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/domain/package.scala index bf5bb2c61..f4a426d38 100644 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/domain/package.scala +++ b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/domain/package.scala @@ -29,12 +29,6 @@ package object domain { implicit val eventIdOrdering: Ordering[EventId] = Ordering.by[EventId, Ref.LedgerString](_.unwrap) - type LedgerId = String @@ LedgerIdTag - val LedgerId: Tag.TagOf[LedgerIdTag] = Tag.of[LedgerIdTag] - - def optionalLedgerId(raw: String): Option[LedgerId] = - if (raw.isEmpty) None else Some(LedgerId(raw)) - type ParticipantId = Ref.ParticipantId @@ ParticipantIdTag val ParticipantId: Tag.TagOf[ParticipantIdTag] = Tag.of[ParticipantIdTag] @@ -47,7 +41,6 @@ package domain { sealed trait CommandIdTag sealed trait TransactionIdTag sealed trait EventIdTag - sealed trait LedgerIdTag sealed trait ParticipantIdTag sealed trait SubmissionIdTag diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/refinements/ApiTypes.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/refinements/ApiTypes.scala index e0d7cef2d..f4ba4983b 100644 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/refinements/ApiTypes.scala +++ b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/refinements/ApiTypes.scala @@ -36,10 +36,6 @@ object ApiTypes { type ApplicationId = String @@ ApplicationIdTag val ApplicationId = Tag.of[ApplicationIdTag] - sealed trait LedgerIdTag - type LedgerId = String @@ LedgerIdTag - val LedgerId = Tag.of[LedgerIdTag] - sealed trait ContractIdTag type ContractId = String @@ ContractIdTag val ContractId = Tag.of[ContractIdTag] diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/DecryptionParameters.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/DecryptionParameters.scala deleted file mode 100644 index 901611f1e..000000000 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/DecryptionParameters.scala +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.ledger.api.tls - -import org.apache.commons.codec.binary.Hex -import org.apache.commons.io.IOUtils -import org.slf4j.LoggerFactory -import spray.json.{DefaultJsonProtocol, RootJsonFormat} - -import java.io.File -import java.nio.charset.StandardCharsets -import java.nio.file.Files -import java.util.Base64 -import javax.crypto.Cipher -import javax.crypto.spec.{IvParameterSpec, SecretKeySpec} -import scala.util.{Try, Using} - -final class PrivateKeyDecryptionException(cause: Throwable) extends Exception(cause) - -/** @param transformation: "//", for example: "AES/CBC/PKCS5Padding" - * @param keyInHex: Hex encoded bytes of key. - * @param initializationVectorInHex: Hex encoded bytes of IV. - * - * Decrypts a file encrypted by a transformation using AES algorithm. - * See also: https://docs.oracle.com/javase/8/docs/technotes/guides/security/crypto/CryptoSpec.html - */ -final case class DecryptionParameters( - transformation: String, - keyInHex: String, - initializationVectorInHex: String, -) { - import DecryptionParameters.* - - def decrypt(encrypted: File): Array[Byte] = { - val bytes = Files.readAllBytes(encrypted.toPath) - decrypt(bytes) - } - - private[tls] def algorithm: String = { - transformation.split("/")(0) - } - - private[tls] def decrypt(encrypted: Array[Byte]): Array[Byte] = { - val key: Array[Byte] = Hex.decodeHex(keyInHex) - val secretKey = new SecretKeySpec(key, algorithm) - val iv: Array[Byte] = Hex.decodeHex(initializationVectorInHex) - val cipher = Cipher.getInstance(transformation) - val ivParameterSpec = new IvParameterSpec(iv) - cipher.init(Cipher.DECRYPT_MODE, secretKey, ivParameterSpec) - val binary = decodeBase64OrGetVerbatim(encrypted) - cipher.doFinal(binary) - } -} - -object DecryptionParametersJsonProtocol extends DefaultJsonProtocol { - implicit val decryptionParams: RootJsonFormat[DecryptionParameters] = jsonFormat( - DecryptionParameters.apply, - "algorithm", - "key", - "iv", - ) -} - -object DecryptionParameters { - - /** Creates an instance of [[DecryptionParameters]] by fetching necessary information from an URL - */ - def fromSecretsServer(url: SecretsUrl): DecryptionParameters = { - val body = fetchPayload(url) - parsePayload(body) - } - - private[tls] def fetchPayload(url: SecretsUrl): String = - Using.resource(url.openStream()) { stream => - IOUtils.toString(stream, StandardCharsets.UTF_8.name()) - } - - private[tls] def parsePayload(payload: String): DecryptionParameters = { - import DecryptionParametersJsonProtocol.* - import spray.json.* - val jsonAst: JsValue = payload.parseJson - jsonAst.convertTo[DecryptionParameters] - } - - private val logger = LoggerFactory.getLogger(getClass) - - // According to MIME's section of java.util.Base64 javadoc "All line separators or other - // characters not found in the base64 alphabet table are ignored in decoding operation." - // For this reason a buffer needs to be screened whether it contains only the allowed - // Base64 characters before attempting to decode it. - private[tls] def decodeBase64OrGetVerbatim(encrypted: Array[Byte]): Array[Byte] = { - val allowedBase64Char = - "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz/+=\n\r".getBytes( - StandardCharsets.UTF_8 - ) - encrypted.find(!allowedBase64Char.contains(_)) match { - case None => - logger.debug(s"Encrypted key contains only MIME Base64 characters. Attempting to decode") - Try(Base64.getMimeDecoder.decode(encrypted)).getOrElse(encrypted) - case _ => - logger.debug(s"Encrypted key contains non MIME Base64 characters. Using it verbatim") - encrypted - } - } - -} diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrl.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrl.scala deleted file mode 100644 index 3a404b24f..000000000 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrl.scala +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.ledger.api.tls - -import java.io.InputStream -import java.net.URL -import java.nio.file.Path - -// This trait is not sealed so we can replace it with a fake in tests. -trait SecretsUrl { - def openStream(): InputStream -} - -object SecretsUrl { - def fromString(string: String): SecretsUrl = new FromUrl(new URL(string)) - - def fromPath(path: Path): SecretsUrl = new FromUrl(path.toUri.toURL) - - def fromUrl(url: URL): SecretsUrl = new FromUrl(url) - - final case class FromUrl(url: URL) extends SecretsUrl { - override def openStream(): InputStream = url.openStream() - } -} diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/TlsConfiguration.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/TlsConfiguration.scala index dc3294c8e..2bd0c8f64 100644 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/TlsConfiguration.scala +++ b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/api/tls/TlsConfiguration.scala @@ -12,7 +12,6 @@ import java.io.{ByteArrayInputStream, File, FileInputStream, InputStream} import java.lang import java.nio.file.Files import scala.jdk.CollectionConverters.* -import scala.util.control.NonFatal // Interacting with java libraries makes null a necessity @SuppressWarnings(Array("org.wartremover.warts.Null", "org.wartremover.warts.AsInstanceOf")) @@ -21,7 +20,6 @@ final case class TlsConfiguration( certChainFile: Option[File] = None, // mutual auth is disabled if null privateKeyFile: Option[File] = None, trustCollectionFile: Option[File] = None, // System default if null - secretsUrl: Option[SecretsUrl] = None, clientAuth: ClientAuth = ClientAuth.REQUIRE, // Client auth setting used by the server. This is not used in the client configuration. enableCertRevocationChecking: Boolean = false, @@ -177,25 +175,10 @@ final case class TlsConfiguration( } private[tls] def prepareKeyInputStream(keyFile: File): InputStream = { - val bytes = if (keyFile.getName.endsWith(".enc")) { - try { - val params = DecryptionParameters.fromSecretsServer(secretsUrlOrFail) - params.decrypt(encrypted = keyFile) - } catch { - case NonFatal(e) => throw new PrivateKeyDecryptionException(e) - } - } else { - Files.readAllBytes(keyFile.toPath) - } + val bytes = Files.readAllBytes(keyFile.toPath) new ByteArrayInputStream(bytes) } - private def secretsUrlOrFail: SecretsUrl = secretsUrl.getOrElse( - throw new IllegalStateException( - s"Unable to convert ${this.toString} to SSL Context: cannot decrypt keyFile without secretsUrl." - ) - ) - private def keyCertChainInputStreamOrFail: InputStream = { val msg = s"Unable to convert ${this.toString} to SSL Context: cannot create SSL context without keyCertChainFile." diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/configuration/package.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/configuration/package.scala index 7393d52d1..4e07210cb 100644 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/configuration/package.scala +++ b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/configuration/package.scala @@ -3,9 +3,4 @@ package com.digitalasset.canton.ledger -package object configuration { - - /** Identifier for the ledger, MUST match regexp [a-zA-Z0-9-]. */ - type LedgerId = String - -} +package object configuration {} diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala index 03986a427..1700b27b5 100644 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala +++ b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/ledger/error/groups/RequestValidationErrors.scala @@ -227,25 +227,6 @@ object RequestValidationErrors extends RequestValidationErrorGroup { ) extends DamlErrorWithDefiniteAnswer(cause = message) } - @Explanation( - """Every ledger API command contains a ledger-id which is verified against the running ledger. - This error indicates that the provided ledger-id does not match the expected one.""" - ) - @Resolution("Ensure that your application is correctly configured to use the correct ledger.") - object LedgerIdMismatch - extends ErrorCode( - id = "LEDGER_ID_MISMATCH", - ErrorCategory.InvalidGivenCurrentSystemStateResourceMissing, - ) { - final case class Reject(expectedLedgerId: String, receivedLegerId: String)(implicit - loggingContext: ContextualizedErrorLogger - ) extends DamlErrorWithDefiniteAnswer( - cause = - s"Ledger ID '${receivedLegerId}' not found. Actual Ledger ID is '${expectedLedgerId}'.", - definiteAnswer = true, - ) - } - @Explanation( """This error is emitted when a mandatory field is not set in a submitted ledger API command.""" ) diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala index 02271ac5f..597c5ee05 100644 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala +++ b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/IndexDBMetrics.scala @@ -167,7 +167,6 @@ class MainIndexDBMetrics( val execAll: Timer = overall.executionTimer val getCompletions: DatabaseMetrics = createDbMetrics("get_completions") - val getLedgerId: DatabaseMetrics = createDbMetrics("get_ledger_id") val getParticipantId: DatabaseMetrics = createDbMetrics("get_participant_id") val getLedgerEnd: DatabaseMetrics = createDbMetrics("get_ledger_end") val initializeLedgerParameters: DatabaseMetrics = createDbMetrics( diff --git a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala index f6d8b6097..565e638dd 100644 --- a/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala +++ b/community/ledger/ledger-common/src/main/scala/com/digitalasset/canton/metrics/ServicesMetrics.scala @@ -3,9 +3,8 @@ package com.digitalasset.canton.metrics -import com.daml.metrics.api.MetricDoc.MetricQualification.{Debug, Saturation, Traffic} +import com.daml.metrics.api.MetricDoc.MetricQualification.{Debug, Saturation} import com.daml.metrics.api.MetricHandle.* -import com.daml.metrics.api.dropwizard.DropwizardTimer import com.daml.metrics.api.{MetricDoc, MetricName} class ServicesMetrics( @@ -224,18 +223,6 @@ class ServicesMetrics( object write { val prefix: MetricName = ServicesMetrics.this.prefix :+ "write" - @MetricDoc.Tag( - summary = "The number of submitted transactions by the write service.", - description = """The write service is an internal interface for changing the state through - |the synchronization services. The methods in this interface are all methods - |that are supported uniformly across all ledger implementations. This metric - |exposes the total number of the sumbitted transactions.""", - qualification = Traffic, - ) - @SuppressWarnings(Array("org.wartremover.warts.Null")) - val submitOperationForDocs: Timer = - DropwizardTimer(prefix :+ "submit_transaction" :+ "count", null) - @MetricDoc.FanInstanceTag val submitTransaction: Timer = openTelemetryMetricsFactory.timer(prefix :+ "submit_transaction") @MetricDoc.FanInstanceTag diff --git a/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.1.dar b/community/ledger/ledger-common/src/test/resources/test-models/daml-lf/encoder/test-2.1.dar index e2dd53a3f85ca9cc7cb5b131515fffeadb73b59d..a87fe3d6cfb52bfd542a879fb3006fc1634bcc29 100755 GIT binary patch delta 4562 zcmV;@5iRbAC%7dIP)h>@6aWYa2mr8AbCC^^1F%tZk)a@eR9n}XX6t%50&&;|TRyfy zmTy=#7{tW}C$Wr;4K^5zvE%rDkPaXlbg(3BAWqUaP1De(7pG~PG;P|XX)aCL#%Yt5 z#A#>EntAD3edyz?nTO8fta+F<5A!fj^Z)<3OTq)0HLS&_{q6nV_TK;g&lVi{?=Q=& zE%xBDsPKS)z$6x7@ffzaC+U?VORQcDK(iv^;h}$Jg6K)Yh~yy|4-EKh8HO_dS0=5-cBe7aw+knZ$II!)&b`o1>#3B-_ z0~QA+5vvE*49r7pJ+K6@W?~zFbpX?66fvw+5!(o?3)og-GO$O0?IE@aSQ6M?Vw-^- z1-74m*cM=Yz&eOE0P6?VNvsjr31A0^MS-0H)^iU}Vh4fE0IMMO2(T=$UBn&*rUBaxXX>%C zXDark&eUV2XXvXX z%0u*-kDM|-hhhAPHC~9EHu}$4PJB{dPX$;%>ac#+uy!s!3PB!&_JZ8M#QF@S^QJT2 zzRX(X7Hgu1BtMV#G&rV?Js>O%K^eo(^O0cu9~l@u4i=u2KVwVmQ)ZgwAFyTiF5_=A zv6cF6MK182i`yNGdq_HlR=9K_?C_C)l#W{(7gwp#Z)sdwrN#iSUS^_*rT8ZIG*zr2 z0}hpB04N_>wepeT^m)r8^jql@mIh#l1)7Z%xf*YPkrUXK>lV2PrI_2+`=yidMi@T{ z<27>qe=-nA-f;;G#=Fj#ZA&al6TTCfC|WLv5|(QsdR6=fzc6gA(gr$YwPKKalg}~xl*Pw*P#A;GuQPdI zi5;_Y+!QQM^5K#sA2yQQ<4E#jh>~|HAg!fqkS-XP9N=AJm3oC2}ckk(gg<_l`gthi_AQKz8W)^TxKr2 zSCPz|UyYe7E;Co%tGk9$Fb$g@p@wezRTp$UA!(v@gm9A;P_gpU8-K`Sw`{`<>lWjC*K3|lOXd$ChuEfdmZx8d!8*T zhDN&GpgRTfuQ9o0iR~fXS6SqF#v7N}=IgRkTlyLUrRP};%A`<{c);xSzt48ysunG~ zVlKOEE_=_aB}X511-?(*B)q^@(&AE2*wH^CWk#b8sTAj>61~@drExhZy#S{=8H799 zIhdYfBCmPYx#o^?Fk3Ucxb?6LQntRxdg0?lc$tO7N~-8y=|wiO#1h`*k63S|l!^+} z6z7HOj>p>#>+y#7u1ms=hp*^$>BcHov{X|3j0w|aQ+}i4uwg>XKoa~I65<{t1oVp) zkucx7#2QFQgeO6N-F;RICPjQOnD#|@Y6gWYsQ!e>b@xEPR5b0XFI)zhH33)Gu!z zF!|&X>!*Q#VB{TU+2$R~tWDnMC~AJbwWrZl=Iq-WXi4udSegg+_3b`f-+pCZ-`;}j z+Z&9(ZeHKsE4`16l-!u$vSN!D-^a>{U$VuE?_=e}uN2?M4#K)zpe2x7ds@VL$81k? zs=QDLcqLxu1rMyKk>c+%TvSxPv`S^Kt^A&)d}Wn?%6?n#*g9lO+gZn*w&xrnpMPSqa z*(2O`r0u+swwMVYGxG+N_{1a38)5Ih&vw$$+0a|>i1xNLPtktN?DMoB{g$mkKE4Li z|J&n#_9^LiIHCVP=QkstuwHL+nVqOvW+;wraKD7^?Hzla2@7Sbl;o&Vm{5zbF`q+1 zEwN_nAa3i~>WE|kPH`pe+#i_zHEazDwNS1~KUp@qsV`NfpDgF#PFV*xzm$VJr4Byw z38Q7p?pG2*ocPt^a@!2+a*>umj=`?bYVFe^$vA7Pzl4mC18q1PP64S(S^ubD8NvCW{?{l1_ugt`06j>(tzgn#-g( zQ}{aY)i<9~XA2oEJEZ3G$|Ptiv$JY8J*LhVcqpah3zot-mr`0<9nm0~{Fp`((4*#3 z>Jcql$SJA9KpK0@wjm>?m4a%t!+QIFY&J80R9S$q)0taFY|ye?O>)0_b55D&mGm)~ z$rqp}m(y}wQ0HfLjS#e^As{mBpU|e$)A~n_S52zf8D;jUI;|E|9@1Ig%q%RIF*K0P zt2u~GXD3yN#TXgXv>S7?=E#}MqH2zuQ>N$CJP%IhRG3AMR~n7Jn>>`6v118;XE44o z&74li;56u)hK1$pvG-GdsnYn3WC7@-hhO>HOOw-4zEv|16rGP=Ia@t(h+Fvn0 zr6fAL^pX5jX2RM?AwM^6c!HMMD-}R;Ogm!jFc0ZHhGU&rQ_?9V9j_^5X4HH^nVB^r z+2rWJtscsxa#~)6%t+_CAG1S$<>5Ia2gbCcu!wxqT#+EyGOh3+iqP@sg9X5b|A;mX z+29>jp?y53s#)&qh3(-X8c@??(8qm)sxrZSr(jatpCS!d{|QK!TxN2r!1*a~pr+C2 zmzU(A6^x?`g+Kj3<>r<2+MmXkw3E^y(*O()E zSDt%PZSb3b-*#SQpGcTyrNww~#yFvZVxHBol0ED=!aSs8ZQ|3k**xE%WY1=A=kQRc zU9#1MshoEEa5jBJDWs-2Q+efaH9MrtlG{<(gIpS$z7Zvp>g>eTpJInWTH*y36m#u-fr@CE>QNN*D7eKxd})e ziYlwj!1h8JgaSCKX$4+6uFa2W)Zv9DuL4*Y9tW?tAXXnTUX6GrGnrKiT5f?<7DxxH z3e>SoUgh3YhUQhuJ!c81h{$^#&lQm1(k%M~ru zI27SDOj4!FBN~SXhdlGG+&kY!upgj(zaibqJ*_Rgs?K)4T=bjG`aWSM|La+MKR4qKoQ7AR*KutV5_ns7F|jumK^0un}Ps!e)dm2n`60 z2vLNs2r-0h2-^{MAnZigh0uhs8zGLc2VpNlGeQf(J_MRp8$tr19iaoE6QK)XKf(co zZiF6$g9wj*AUulj7{VchB*I~YUW6kEM-loEjv<_o;qrK55St?iqX=UNk0U&Ra2eqW z!Zid1VH_cipdw5nOd(_tt|Qz)m`2DVXb7_iHxY6Od4vMO9KtPx+X(Xr3kZt{cMzUJ zcpBsV3PZe0_#EL^3BOMG4Z?2{zC`#f!fz9Phwv4D!q*7jApAMuF9|;&{59c6g!c(Q zCHy_%GT~<|77+f(q(8B^Pxc7agr2w{>o41|INagsrI-r*Bca9cU#Ar$TSKU82VCRK z#e_I`i-gItAHUvNqxj-Z$i=GS;aC-Z5tH!`)SjdsRhTenN#ct;lEtJF#>peMJl45< z)LRUHU)EV4UmvS8JsO|*y3YpE7G1O1NS?4?pm~@r+djUkv)QyL8Wu&};udRdC6|1o zIcHIybChQ5wPa#0jTl9Q^OLRl?QqR#r)##mEQT-ZG{qZYn~P^_eBw(-yDe#)dBDZ? z6vesMS|NK!TU=Vm_E`*H>}f5s(dODWv1qh^-SXhx-%eLt_UhxSSo>Yx-DBMr!>r1*u_cKixh~uOL5aNp-q%fR)sZvqg2|oJ&@6$+9BZCtRaa(db2g zYcyCALvgjb;>yyEE@oHOkgI>(PrDU0M@VBYalhlqRcR6NX`?*%2}5L;1SZxKIS#E%wnzg0QssD&?E-m<*T!tZU@s?S?eH(mXUF1!7b zox^u54Hoyb)>)PEnfL}?{@(+x0q5U;xZ+{C3eK|qvWU~?FwCV=4hr}m&95?Q*3v62 zq$SZy8~|Tq@oG6_#O+PKpcinD(dkXT$f$0bIxnKm5{uWws*PRpK*u+Vj6P@cyU1U% zIfeQy_hUF#QhaTVPi!$haSd-@r|dS@dT}PgwLV_f$2Qs&g~mcJIwA ztSB;jgWt6`_`RaFe&226mmq2UDf>gSS%*zi=|fcdm8}Hdf|I|79E{bMltFLuH?Wa$ zk^XD`5&nya&VFTlU61|FNkN)_gVB$fbpS+bxqru~iaR}hvXZAy-IKp9&0CZIgL`P9 zXca$m505&Q?SZ{)UlgV6AKlCLCl*VNceb{Tcczs6tp_^NZQb2n2intJ@6aWYa2mmn|Xps$(12GwBk)a@eSX1~jCQWl`b8DIw zH%)IzdQ01B-{zqYows?Ihk2WMnzfeBZA-!fnQ!<$e(itnb=!Ndz0MXK_0yEf60uKG zvc>}vQ&@z5#bemwo*u6nnPbgz0Gc%s4-fs5NwTL0MidXpcwoTi$S{=oSC>o!$arDE zuloO;L8p99@>xz|%N2i=#TC*FL_G8vj0BT=B)=l5o>ot+b#|j%r^<3b_Q*9#Kne2Y zy-jkRZ1wmQzv@w>Xs|o@8uNkIp!{(_lI&^JL8}&jtk$ZYXpoo;EDWrkSO{1nusy`; zfGqU|Wc-1l9v=E3s9;_5WahMJx)eAJ|S}Yk{2v)=ex1>@=|5#MYT}2oPIuGC#2m zzy^V>BeoIP?ZBFdZ31=%*m7d6rk;=3W|Mh|#m%{gh;1=hEwQcUdNvT-2J9@bW@2r? z?gG|AY&)>KfvqIg4(uGTMq&wI99WcC(#(Q?WyCs6e-g1yGnc}|b^tpMYy+{Kz%*bt z5$gh`16xgO7qC%atBCCeb|0_=v2I}Fz}krI0cHT(PV8o21z_#OZUHt0tb^FCz-E9Y ziQNY5eqbwz^#FSSSSzu;z#alt6R+n%sh4|u9%eu{5#0y$5u}^pr0%bJQe&U#r0%bO zJgJ}hq(0k23wdaigAl+K=D3K3lmk)L?dRSTCr`y9%E2g0a+12rBB!bGITjg=tyKs6%jP}tL0F+G`<=}PGVbsZJOmW zRDAAO?^jO6qcDC7#v9b-|H+oYbiTp_iGlbA*rfrE1FG5`afQ;aoZu4oB|Xvmz|mR| zI}KuAW$N<(u4d^}{0XwSS%+J$slQmmB6;Opa1hz2a%126+?0h~#4G-&l)` z+FY4u^>T$FHzRu58&LR#>)NbX5MtFAY^aeNVeiA7%Vm z=zO+H#-m_)#t&6w{E(UP9#_Wyhb!a%2^s$nC*%JP8UGi?|HR~N1AZ0Ib3ng;%hbjzEDY(m z1K5+e-%l|by_4408trtA_QB{~rP1!$T|y-1Tu}@QF`ZE~akofJyt@30)#Nd@36Ocs6 zf$nb5&4K(IOl_NETS)hH7I~cUHS=uMy{cPVfs*D`9*2cVdZe0v& zvDG9b!{eao9o7gYWqdkV_T~5*P%VP$TTE@b#u{x^OJE6SHeY#WU-n8d+j28?YZo9!3SzS;gRda$36=;6{5JbcgP;U$-cmt7vdU*TbT2_Amn^6;w5!)q=N zue&`gVV|Zd_vxH8Q#HvvpXuAjOr?XznG~q%A?Qlut*}V=;Qtv@?aU22nHzLx?o#3j zRL1jfK54jr>V51<_qU;dV!%L6tW~*PH4ldQ$)TUqQ%8<|qEBX2U>Ht(Eg33a=xsQLNY?lnT0bFOfprM$^tX+Ct_;ogNi+?&oF?j^Xxy}Gpk% z@mjpuOCDHLGs)j#xU8t0TBNeqQGVN2&MZ>d?{j9!XIQ?)UV4Bl=So`3aN%;gKG+ z6E~uNT(-ey<04=R@_-0RFaQZvfW*%IXAOECSv zJ#jB9AK;Asf1FFDzb*WN^?G~$$c{A3GnB_R_`in!ojp4*Jyf+Y{E2<0TKG_9R=mKZ zhgsEPsJM#ZEnJcw!R`MuQxkKn%|7qzyJIqceh&?j{f8Npi&{#MUo+=RU-D)y+RgvGnKQpH6|y2`39^i}qmd|a*K<6WP0rmByp+*Q6+xudeW zC#~xKkyYJ4cB=c|t5x@>s#Nzg71jOB|FOFNwX(XOsi?p31=JBQukO{v(~fv~b+0CW zu2)p|Z7>0i7D0`{HNM$3-#hFYt9-;%`WiytmeM8hbpvY`5{H6_h4_=ww<~?I@^3Gt zYM-gd8Xnl2n>lV|cu>pDBoF6`oK5p!HdjnSKMz1-K%eBnUc(qiDQ~Ya&Ha;FzBt2u z#j$LGODB=gn32=D+&h&WFJ^PtA!(U^3|QRR$z^EuPfh6gY??ETF9TnF(`kLOm^E_8 z^+G`#1x;;oQqN@u_30uHrL{uQR!DIrZDjP521HXBG)Mw^^n6<1XXJ``EnPgE!5*t^ z%E%e5sGIF@sXdg-PVd)dAnZ)`q8S^sY*(Y)uV0wb#(6D$9LpAp(38&_c`oUH(~~8Q z5VXc2ATsP9F~&3FrH?$X8`X0Y+T?zHTrcW8RAPM-ldxRo(BWJ`&qHh_c2tL0%#mY; zao^OWHF7#Tt6L+(+W3@S;K9+n4ztMfTC>r2frqjaPAuUp#y4nK(+QcJ27TkO&^&+$ zdeVep?u|4tHEym1v@F$duGAQRG)iOjHZN@urGQE5^2Suo-e0*erX_dmER7V#vLp6J zN`9D zL4!8jd!IhT{TFf3c|$Qfp%;qU#H2adm97rVrsLUk-YDphe3=6GV*)~dy_ho7Y|z*b z`@~mHl?j3^%L)&o2%W$_SYvGX_Zj1maNYqO+6VKxp5wk=3?@VadS(#%xbK**jc{K- ztQ+^INdvZc1Tr$89UUuj-VYA+3>qEOAxSWhdV`LfHeT{je?p+4w99<3-+=R-8^uOx zxc0O$p_iJ?X-Ysd9P1E&ayH2utdUYzfqT*k_)WrZ2d{GuJxsIKW;{4y76D1K%0XDm z?R8ZN9@26S@fpTsfwyS6q1>fB){TrHTVEW@8<+OxGW)b*dWR^4gR!J!nve7n-~dVCF;$ zyk3M@eaLt{;_2*YPAeMu8B&=c9V}c>DzgQ6q@=Sn|Ke0WhoxQ`)r+N*6gWNu$qYM~ z<-USGKEmtWA+_^=0EngaaUR42LJO6GI-h|_>U7v-^L*lvXS$txrxOHS03BVXbUXL7 zxAD66ws!l6FLN`ib5oVhYCl{eyT`R@zG?TOe%fv<*yc1B5NxaH0l~`7YCl%A*&J`( zwkQn<_r{X$co&eY5->l_&Ll z0SZ81vh?Zg2P^*5<2u}7*c6wsf}3R^NNudPv@5)}0M#v8q^Fi$oJkN52A#`eR9y$C zy2AVu_74w#>p8rkVW27w`X5qt=KgaAS zR)lQ`Z3x>DXj%z`Bti#5C&CVdod{hByAXCGbR+D4LAV*=7KB?7ZbRrn*o)AMun%EB z!a;;X2!|1lARI+FhHxCAA7Ma+o9w_KHqRiOMYs#$9D;_RBa9%7A!HHmMHokzKrj#{ z5iTI)5ef)Jgo_B55T+4k5M~kXM|c3?L4?Z)4`D@wA+a8+)AI&zySEZK6 zm&ckckLD-7Y}G>AvM`$!wZIZCtj z+A=XgBSsP7{8W2>8-y8c6lS~0X85X9YkYNVRrzeqPkiNTvn`D?4+v~aS)5z#6>@fe zv`x@Lw%umf?)WU4nOUY`4wu)v4|U_Gyox(%2YKM>mtfFpz43*W5VL?_AWLcT)9m1$mHhQN$8mx$+yjtBO zWa$clIh8de^rr;x=`xEMTPrANjZlA`aQ4RVSCGn5x`|rbj=D%|f*g0}h(7jZqJ55#6XUohAyEDt(bz7`a`1@dgnYGKp zzQzPL8xM2$3u6c3usdaahsyZDGJd3t`{T80P{P07Kgy`tNbk$V!)hJ)#kT+f9%J!( zHDu;BQzlgv4!g%{>g_KPo>(ZLqI3_P)N2+Z?m$yGW1N^gYpk*A&dvy&La~ zoAXL}WX=xHJ3D;6Y_WeX8u@ksFY{2F#L zF4MadzrlYSQWdC;FY9*RM4DCZhtZFmEuW;vnA`j(3z`3&IO7ME%l3@_!0eg}>Bz-@ zGYK>PR5WtNwQPTJmhG>9WjXsV;TI)z^>4ct0 zw(r>0naE_i+7r7w5<9zgk7(_gPCcV1Qd(l?j$NtF5iRvUP)i30WZ3LoClUYvk}&`P zP)h*<6aW+e2nYxOF&SuBS^&=@QUCw|YybcN6aWAK000000001!O%f*#F&SuBWZ3Lo UClUYvk};El5-{qLEdMMGP)h>@6aWYa2mrKEbCC^^1GG_dk)a`f6jip*?M~8_>MWI;gcKEo zPP4@ZLefcq1jP_Q_QeoTP{B@jC22^yLw5%fP*L3DjthdLqmDX?Ix5Z@ad32WT*iHO z79E|pxXjFZ^WMDozW4rl=bWmptvi@F-{kxF_5Iy@wtMfnr>eRO<+Y^Zj;Oo+FC@hf z&li})WESv$V~e|5-HJTSD#Uzf=EyGY{fr5ss})8h7sv-aski+B`@fHX;BW1 zi~9Imnz$Ee5zr4bTBsX)SEEHhOM!l*(PE(GKtI%fXbI2>KtC=Kw+m7_Wd8)I)W-r6 z&9+i@(Wfkz2Nw#ZQibBGartZZ&JYU~QOp-zVvfY6GCrZLTr3pRJ(5(axTFeSd5h;L z%Y~Ja#sBjN<)bZ1AFW%m-{fuo=$n!@+7jaFLiF*y+F| zVpD;&0&@|Y25cFyImD&|TMmp7o1t4NBsLS+N?_H*YJjZ=HjCJ)z%~M#O)LQHY+&<= z%>s4~uts9Dft?GiiP#)q=L1_ntQJ@Z*gRr?bAg3{wGgWV)&;DYSiL@nd}2YJND z)(vbbu|{A!fRz(#0=5&_1Y+}ay=3vK%cwUc^q)CSsyGQ)}k-3w%H4T^=a9S zgQo$F!(aum(}C>*R!M9TumrFw=NZGnGk~UGu+(|LaIh7q9|nEI7V8OB_6>&*W!vk^pq|pwiMU^u<68>0ox7i6k=xr+Y4+mvE{%n1U4y9%ss*i?ryz=fsd~}#8(2p z6!{k#Uj_VfuhHSnvEf2Hv?z=x6lS>tPg-+=s29+-khSO@$j2JR*jT@UnTq%}l0 z0KFAy4k@1n^fpv}zLJL9f!~h9Gl_101bPVRsZd@zvX+;oZ&hA8GRwz|W$A1m3v!Y=%;X3)jxbsES4-!BepOn(iyC(_xjWtH z@%yALAfiymcbL2*J<@CV%9YM-$pI%@txjTToAE#gtk8Kt?q$l#VRojjx65#U${R*l zolEy*D%qtd%xK!L#lQWK# z*p!hHpuA?Uv9ujFTcDM!kOu>kVdMgAE9HB+2-hWUTHhvx0zMcI z!MIPE@K;s_)48A7Bsv4rVV62N4k${CYzbwXw4Dn)H|X+R2#%^jEDT}~FlEC3X0sFu zYypu7h&;%Ybt7!`)|GT$GJS;kMISYl1FlMHkT+Wx@_xVaHmej-n@b~qtVr~dB;0KX zM_5aZuw9up%&KW0?`QIU$Ff1Jux$H%gW^khf|~X8Jz_b1l<_;D^NB1OkAlvOS2HqR z)id5@$@srmGX8VO_)pA?e-|?TO~zkiVy!5E#A8gpl5ziDpCubqX$K{vwP&X|hk|+x zPslfm_nP9VF^Kn>;saxU5RW-e1=ZH6VBm+0ZY?GOjRXA&Q%Vo8V#voPU=QPlA7wPU zlNLBV+GrVF2BW>&Xv^LOwn*ZZD7vJ63+r(%7n$iBi;|%E@MGsY;TNzwc z2HSD~huU%iwGhT$2;*s{OdV!EO7RgULza|3=PEUP?UM$%TRO}FEg*j($o!lsbBEb% zl0VMmI~n)A;yT5U-y;oivpuM|r8`+*1?XM`^3O5l^kLRQx+j=?7o*m*Oun1(X(Oz9 z#}td5LFv12cOcz=1z`tWLQcRf-K|A&G3fl#62&u2z89i+`=n9a%K|GwcR$F##FRzD zY$55sU__zKefo&YZ=1W>F4cme#aP7!;SxLruUc%qY}nEwn*OoHR%S$G>s}T-&)oN8 zY&!0yW4rgHw_8bXcgvV1+jfI5j|8;ZMigHZVY2?ybHO1aS(C?2E-h>idPS_$s|<;_b%nf zSSc8-!dscNhbp*j6ohM0_dTYRA7rJ5ZdxLEnvp!G?w#qlz*M$X&{kD9S@bej0rk_jKsvhH?3L#^U4W79S@pKAz3+apSmr zJZJIoyv4^aEk0hb`WVN4U6*CQLc&ei6V7FMY970bo3gmLo(Vjw3)zy>v*D^+1`G9j zrWjc)n^`PdvsfO-O%TtF=a0EUSJnrKhpmr)@gAr{Fp}f1k-8QAkm#S)(9h|TR3-n4 zQDdh35({kBpFKWj%IaaZf(B&yWo8)XjU%jHnQH{s1>0WTGQ|>Zm*wFDv?RD$C=Z;l zJbgR`Palt)PapTg)5krG!wrRYzyE3G(?nOs{XX1jo8r!=iEQEtQ{4G9kxe|6@iei2 z5GK%1i=foC)QT0B`F3;dx$z;!3-Fm#aKW1DiT(=1Wku!07?s_o@~eh&YK+Qxrt+^* zc^qlhm6>K8!p+9XQYK0VnNkAA1r(<&dIN6kTEAeB`GRl@#_<(Xe4UfUNxX7zg;;oi z!L`bV{ePy1W#8LOev^q`yM$ZK13OWFx{VphD9i6L<3=g?yi2$(eYu)-jMdN~n%Gup zxdh$HK%K&di-B>rE|T77%ltQ8lMgWI1Dx7FE&@UOn(RZ?=5GBrwyI==;XYy; zf_M;vFgLMNxSeIK75~n@ReiiYOMSeD35T-R;0jBDJRQ>H4orvtU`qWkt2Ii0z_b>> z#la!GloU|H{Fa&DV2mTgLs>%X6b@%!h8w@#G91q8<4ATNhrgYVBUyYrl`HgQ^>Nf% z`fH?UX2lPtEB+_xivOuu@&6}V#Xp**;`d}!{GPwF;{PqP;`e0Ko!Ep-9B+;YE??ZY%8@=(W(Y{)8HiCZq9~rL!;*ABaVwv2HM@@{(kKIMl0JhRc&Z z(XLc`w7Vx|HJlviv>HyuO_uItG3?iwMOaG3JJdm|HBVbS-ixzwx5Wo}UVkW&8sfRB zo@kN_8<5agJf?E7Z6Mm4ipH=*fT_bO&1>W$v<$ByUIy#8Y%r|$r(mXQ)nqc%4Vt0; zel-?B&%CTN+8xv6JK{!vSk_Jw^M*solwr4%OJP`{4RP3rq%HwHY9g#IiN{ijP&lO?3|HMePu(>T>g5IWu_l^KK~EwPPjEpU z?AJ8B(CUSN$Z%d)yf@OTeI$5cw;JmU^)FR>)s)J;8e7)a538zw53P(P)da+*vDGTX zqK~YJ$9E3&r$;tM_p0fU&7s}_HOW2Q2^D6M;01bP*)Hyl_L;F1M=?HgVZA!1!Q5Wh z51x+*dO}}Z-$DU0RbL5c8F^)Fh$UN!jZRIW$gyjbK048oJFO`udqTmcdD;l&b$UdA zeAeAV%gj`e?1(RaF}9t1wI1EEMvQrCloD1OGxwlSr`jf@2lZV@Q+?^ZI;FKwk9EW; z3*0-^A)dDzcY>FsqJ3&I73%BPCtGFdz!X~>4JYDB6*4lC2lUEHmRKa@MF881a)u?5#>e$Fz67Pk7ymYs#&|aQU)fmrh!|=Q` zphh~NkLRvYLtQ*~9hR89Flj(0bV1@Ku(EKz4jiZvG}@s;E@2==aXP@EUd=<%dK>kI zE#^Jz;!rJO-Pq8E3pU34RIOPNraaZdNuO2D`guuuMC(d&SGXSj1>s)*Vw+-n5?F;IHrb+7Pa?i&aV)YVlnVE7 zrt*U2YHV$&pWL>?9wZ{z%-s-*Cb+mcl!%66DeTE#7ESfQK`gdU1ER0vxsWp&))NK^ z8}KqhIR`~HnyBXqnH3Ss0`#6+q`5TmJD^NZDbTLC6dsy3uBU~Lqsd!?D$c4))UMJXqc}{*Kt=}EibIAtuy|3nU$qGXJ+Zl_QwUXyEZh)XEu-N zulD4EwPte^!P<--5X|h%_Lq(}8sjx{#-ss%;htR58t*8QSpo*hF-Bal-Y6LEE-cr{ zSOgQhlNZ)ocd=+rur_G?frEyV{8<&njYF|`Y=|na=gfgv7&4MGsH|OZx_PNqG4=kX zQK$tav3SadS7)_Pg$#{wZ~9zPR&G!eNvJ{j{o1E{9azcR*sH>MWdmHq%Zgr!Jk-X2 zf=|1`3zAS^eJQ$>8rKIB#KS~Kd<_+Nx^=|)*BgJlSdHPmRAPH)G!}v*l?SU+Tc@~1 zmV>|$+z7b{c?kIk9s~iQ06|3XA`~JNArvD>2&D)dp$uUHLM4KXFcCpPn1pZ&!eoRg z2tEWq!c>H52-6W}Ak0LlK?oqsLYR$zFbAO)VJ-sAs2(AR(16g0(1b7#VLrkFgl2>m zgoOyFA)Jn|2;mHbR)oa}Z3s&cmLe=eI1^zx!U}|y2&)iQBdkGKi?9x1J;DZrvk=-5 zHX?K&oQ-e}!WM+B2_ymz za1p}AXyFoucq!ol!pjIRC%l62D#EJ?uOS>Jyq54f!s`idAiRKu4-(!&cpKsE zgog<4AUs0&9m0DE?<4#k;r9t2B7B(eDB&Z7j}ksc_yfYn37;T*lJFSePY90_{(|sD z!q*62CwzfI5uqy}DB4@MECA1J~-KVx5su6*l=` zI(kuyzJXT8ZOAm)G@2+PoZqR&{1(`MX4Gt(ZHvM14cdi)$^J>s+3KJ8Lh)&aG)7Ws zV~ZSdo?)zzx$lc@TFBZAhHuv{aoAXD+v6(7=$VEGdoHZ7#bvHOzAnAW=G{KF#$c=A z-`Y|3X`M}Hrp?y|gPnB}3+*K3&4{l9|Jm$q8pXoqQTjVux=Nzyc-yB172SdKoiVHRKH%kiskB3_-hDll#MdmM5}D^9&}o~#gxX{8?^0t zlcCGVc%YbDUoYMqfUR@%o#WtVJNQ-yFQvKT4`g>SYL?R5)q!HAkY+@GKR|ugToxJV zJ*MPg`^bIv5VuL&NwR?6^ramwr3G@;mD4~T41AvjDtY!_X>9}6@A)Y_ujP$Xt5?-B zsYlIFA2TQQ2x|Sn9{S_jWzc2p<&(Grm4?ofsB_G^eJ<#D%3<^olOIL?Ba>68KepfU zRN6N9r}la5lLn1oMo>C`&o;aVhV6XF@O7Ng%ZA1>9A}25V2dGdbBw%wAJf=x&<~NWGU&&D=^2+rdx}bi3XVZeF`nu+>1q+&+nwq;Bn>s^{P5%#2O9u%1AtZz! z6#xJdIsgDrO928D02BZS2nYbQQFB;Y0M8>*0001N0001!0Twb1v{7?d`ynKR9~A%q P5|cm{F$V1w00000*cx@3 delta 5511 zcmV;26?p3ZEcYx8P)h>@6aWYa2mmk{Xps$(127qAk)a`f8`ZVHYB($dF!?|Xl}bIy!rlN@aN)!&z&?(g2S-FweHGb2rumy(J*qVDlOmlVT1 zPhb+0S-_8fE$(V{EAj}d5c8m!BfEIPhfEM%tuP|FNX7*N9#e*)%zGA@5|DAjK(3Pe zF@sL=X(o6KFGbMGg;7Cq`#$j!MJokB|Ef{1KKd7p3P1~ie(WX7`9SlP+>rZ~#W^%C z>f?Xb#0!8H0sTy)g}Sj%HChC;6zJbIS`4%t=-)JdS^{(u(9gZ%4nazX?4Kl+`dC1s z*;dLf`jqAJ&?2E!s!&`tE`QCwSz@6gig}_-%#pZM#wWFvi-lskN0LevmsH^^Z}A>u z9#|<^{K6}gkF_X;Kr4VMip!_5Nx-H7D0bu6>YbG`a z*m=Mjh|L9dKCnh&^MG9lYyq)aU}0cQ#O4Ek>jKt7EC{R{*g|4;`W*6zg>;rntR7eo zu<67afb9fUPOK5wE?|?0HR*aDV$C{p6I-Cqy})@KaB!hMm`|)lU)~ZDT?DLO%WfPz z185uuD~O#5Y&WnFJbTP4Izy^WMB(@yb9$=>tI~&+OU{i^$0Cq93Dd55@tmN+2%NWo)qN{*jj`5`!$?nu!qSm{wXigPrNYvYSy@YH2b^rPI*Fxi#seL&LKgtJpDC+G*x9 zTBcf-0ee78>V|6(e(sjb1FthMdm)(k22)P55%w%g8|F{3QT90Fk20~2oZQRgB%if! zhGnTYOBbLWE^RBec-bOdWU8dbsS-9-2FIz=$%{u>ftWAiQgK&J&ID3ob4E&l^44)H zZ*}I=V=iI4kx#|a4%lpgRkGeY+G6_+UH?<3455=d28- z^B}WHbOvU^E_HGoP?Q$g63TXI2N$>}DnC0|Q#_xpAr?O-`3OX}h&B%CF&v=(5<3F=x z{6~=S@0%I_E@b@MjK9IeT2TOrCzyOSonm#%OdGEpU3Y!7{oW zM*FnUmVFCsk;E-gbV~yk*6Um@GSfdEGYOlSq*Ga1fsXeE4+0Cg6BarsE^I!rqKCqoh}tuQbff_K@P1?q-3NpnD0(zrd6;M_3E#o@DYpj9Sk#`Ci6njI!#T(=2v|q_4x> zfpiaq9dZdd0k?F2uNK9npz~u(6wfjFeu(0oQ$}$=3#K6q%kUJuZn5>MVM~i>=6e=fnGun#`&sA$bKj4%nYf#d?cST- zZY90lE#sDK+XKG564Y)Wa96tp7S-bCsH@Dj;wT5fq_qQo#^umUN&FCNgOAmENukSv z^bp(lFbla`e+v6*&b2LH8unFCw|H5uuzzB?xd@i`ATIA4mTi7nOSTcqHfL@SB-?yY zz5J4~q^VROthcn$Nm3oI$BfkfN%{nQm{n2mKk?s`0NlCu4VeN29C4Y6pP$8Ni2qmdnuS}m~<_3 zF6;zkMcBFPSk_C!T+5{ab7KT^<6X#&Ly#LVAm+$_RlItHO(m%+xOXW(&Pu^(72e9E zJygMMqaa+5y1!sb`5{(n=%yt?XBf$I`o7tY3ruBO1#Oi!h3i=673K-IaDzF6CYnKz zlJ{3kenzW%k1+Y$TI)AV{*KoAEt9{?j2U)9ls96Ozh}yp5w^*=+gjDK!VdlWtB?Pt-4aZiUnZkmvf=PW*cXz_8<;^X-YA2(0P#|su8FIs&3 z*y7_QtB(on*9}?rD=gfSJ>guIr{?jyxFw5=8=1hfx{xh7Jr}OJWw20xV2Y8&vYExQ zHH+m5+ywE=c>d%Oy0bn=JZgQ6_d*?lksN=2jnt#)heZFZhJH?$q$>F*j2g4$S6E<+ z{_OD)Q`U^Il{6sBuQJ0pZy05D%6ucZZrJwfmT8u7yDbkNpe4c0LV4h%<>})Yc=~wK zeEN70o<8nl9BwGI`~43xpC-C9?)TwN+Z1;`O=J^KnBvZ-iEQGjjHii3Fo6MD1SQyi zQY%(i=G()y=f+1E&&Ov{!3AroC;Dp)mlc&0<5YH=%C8&Bsc|ajn#w;#*~(wD0_$5{;>qRDNQmP^p>4Ad!X zxEL5`>k{dgY(g4=37P-0Yw|%R{R*e{uZuv?z9#!MYjd~$jjb*jWw?*nh9DlsAk0nd z6z*hMYsKH$SE`S9W~q<&G2w9b8eCy1kY_@g+=c1z52nXA@7F;?9bnO?)t; z;?IK#?50If{OvwV-R+g4M%|Tv-e-#T)Q(>>BsKBr@PIIe_y-Jso|ON{NPcj<@~=;% zlJ{n04bNK=8(tTWaBny^99k7iaW=%g(O4=3{X7pE?dkw8YK!))AM95X(JquN3P&Pl z4=;*D_gHEFV4sDS4J6dAXfhg)Svm_7@xfRm8tVabDlbWPh5J;?aCx$SH`<+QkM{JY ztcH_=omRuCxXIF;EQb9$vj|J6c!xSWwD3un`H3 z#bYWL+Xkb3sb~y41eiLkQd0vLp=Edt^D0@m)nS!1~BA(!aIy9hZ6hNyF z0wTk?-SNIipZ1a9g*|GlKRmEZ?Nd`KFVNWX{sCB3eP~rIsU{$QHjS-OAr^gPZ9Kkf za3DRhDY{Qhk8BC|4XR1*?MbLGiv-Wt8_Rd|f@r@POK}wAGZ(f%=QQZ)gZS0#)eq3rP%1y6p9?XM(LvyExFU0VzM_JYHZR*D6i8a0_3yq9$IFm zf@DX0sj=<6K!MwWcv6Loj3jw3W~B;K8Xi!gj&uw|_xf-qC_*)AIjlN1a+k*YATQnRDzsOBBvduVJ#83X0S%~;4(Q{a zwQ9JVd)8x#$?YNy$b@c4+yqt@&ewwjHG)PvRLCU^q$o}YINYasDB5771zi^N-t}>) z7O@^|Xv6uN;{B@DtmvXV)x$}jRn7)@NqR)` zb>RVW+YWn>h+xyRF&s^BaZ5N64aZX0leavY>V<NFlo^N_~t)4ZMwl=A4c1X^mM8v-D(>d#Y>5#b+ns;LXVo#PAH3VPg3oJNAz{Wa9UD49|F*&3Nfw? zov>1WdtiNwkZppiLpT*r43nI0Ur2|`i>$K?@;va-rS@?zo?6Tyv;A>_?5+zB@!1Q<^;dgp!CJF9 zhG1<*4+v&c{=h-~DgLaA;-=wPJT^>~*L(J0tP3)dGpMZHaJqS^Rx$PdWl^XFC9!zQ zhgWB{Uxf^f@PhQYq^#ViCX!Ht@&>d|_j<6ByQxow^U4Ofh?f<;5_zeO1)p|>=O>|m z!1_{jDK)MSB#4KJj`&(C@O0~l^RF}hc(EG8d#S{Z&S)$QM=BRqr#7g#MV5oW5ZnkJ zgj|F?1TTVskdGiD6d)8L6d@ENNC>3}9H9(h5<(?{j4&BNL70MY8p2eBX$U?9Kf-i` z83;2GW+BW*s6hxI%t4roFb|;?VLk$X&8Q9`giw#rfY6B0gwTwz0AV3Q3&J9VGZ4;1 zSd4HMLMy@&gf@hw2+I(bBb<$}0%0Y>DumStYY^5VtV39jumNEs!Z`@-2%8W(5Y9z7 z4`D09HiYvLE>_r$x*oUwm;Sz*@OVPq* z4DoWpgM?QQUP*Wr;WdQU5?)6*LU=vl4TLum-b8pa;Vp!>5*{MFjqnb_I|&aH-bHwn z@N0zk5k5fp4Z?2{K0^2?;W5I;2p=bWg78~}PZB;w_%z{h!tW8DAp8;G%Y<(bzDf8q z!k-hqOZXeY_XvMW_{30OF#=Io*i7A4X1)D9=roe1t-T=fYmrzXjs*~>#B7fQ9utznS zqN*~Ysxn6L-P$QOcACX6zK1)Ntef-jk>50{$Y6e3gm&9A9JXgtT-8Qgvu)Bf2E$ir zPY+D;m#200Pkf0tK-!{h&T}a$>=$UZ>6U39U!0|SZ3SfD#z&Ah6j5tthB{tu0Fmfz1rs8KDO3iYvA9yG4^S_ zO=q^v*G7Y#a|#RXBvp_RUkCoP+1osZg)L+Bci3XvY_qj>44rK@|9?xykh;LgPJ6uD zZ9aC4p?#4}hm9fCY3!%nM`R3n)sR|g%K?^A0eT%3AZnAXu*v2*WczHR%rP3XjaE2D z2aHiUBSL4zOWJaHu8o;jn<=*by*BSd4vWKvmTc47PyL0qlX(gLvq@d*IBf?U{1V&F zUm7S<3Upm}>p>PM@qZT?!tT~9m=Q~rA&D=FUBzsQS34E2VYU#R!`HC@H-2f^tiL$1 zC6KRpA&=ax*E5^58=UGlY8Nq&E`>ja&_3BHvrV+h7Vjaa#oJ72EWIJymNy!@jEo10 zx%KVhEdkg$N8fo4ey)RWbMR7{JN`a)52I!&y<8nARtjlG^nd%)*Ue>-f!=3I9!*7GR&uo>k^)GnH&_PuFe%Wg{w zzi+1S2aas|qut1dAgTXi`Bu6)8=GmRKcUjUnM(LSdj1_wnZF|AYUytM4_KXmNN;NW zC%%V3l`lU~(Nbpo*nTUM4x!19&Gfp8(M8_s;nT4^{DlQ1?)d56nD%GPxa!!K^00F$ zKew-4uYYCj{>xmuFB~WHzwK-HKg{1+->@K3-(0`2JGh`ZxS(-iU1w8sT~p(NP*Z0x z*rcirjh&5=V0UvvXQaCU1iG6SHgzuS3@!|H);9!$b^i}gO9u##bpsw36#xJVIsgDr zO928D02BZS2nYZ$8E9Bq0M8>*0001N0000K0Fw(AFbyynXjqSR10EL@000V;NER^$ J", - | "iv": "", - | "key_length" : 123 - |} - |""".stripMargin - val expected = DecryptionParameters( - transformation = "algorithm1/mode2/padding3", - keyInHex = "", - initializationVectorInHex = "", - ) - // when - val actual = DecryptionParameters.parsePayload(jsonPayload) - - // then - actual shouldBe expected - } - - "fetch JSON document from a secrets URL" in { - // given - val expected = "decryption-params123" - val secretsUrl: SecretsUrl = () => new ByteArrayInputStream(expected.getBytes) - - // when - val actual = DecryptionParameters.fetchPayload(secretsUrl) - - // then - actual shouldBe expected - } - } - -} diff --git a/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrlTest.scala b/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrlTest.scala deleted file mode 100644 index 634f77b85..000000000 --- a/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/SecretsUrlTest.scala +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.ledger.api.tls - -import com.daml.http.test.SimpleHttpServer -import com.digitalasset.canton.ledger.api.tls.SecretsUrlTest.* -import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec - -import java.io.{BufferedReader, InputStream, InputStreamReader} -import java.net.URL -import java.nio.file.Files -import java.util.stream.Collectors -import scala.util.Using - -class SecretsUrlTest extends AnyWordSpec with Matchers { - "a secrets URL based on a file" should { - "open a stream" in { - val contents = "Here is some text." - val filePath = Files.createTempFile(getClass.getSimpleName, ".txt") - try { - Files.write(filePath, contents.getBytes) - - val secretsUrl = SecretsUrl.fromPath(filePath) - val actualContents = readStreamFully(secretsUrl.openStream()) - - actualContents should be(contents) - } finally { - Files.delete(filePath) - } - } - } - - "a secrets URL based on a URL" should { - "open a stream" in { - val contents = "Here is a response body." - val server = SimpleHttpServer.start(contents) - try { - val url = new URL(SimpleHttpServer.responseUrl(server)) - url.getProtocol should be("http") - - val secretsUrl = SecretsUrl.fromUrl(url) - val actualContents = readStreamFully(secretsUrl.openStream()) - - actualContents should be(contents) - } finally { - SimpleHttpServer.stop(server) - } - } - } -} - -object SecretsUrlTest { - private def readStreamFully(newStream: => InputStream): String = - Using.resource(newStream) { stream => - new BufferedReader(new InputStreamReader(stream)) - .lines() - .collect(Collectors.joining(System.lineSeparator())) - } -} diff --git a/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/TlsConfigurationTest.scala b/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/TlsConfigurationTest.scala index 44d28fac2..6762480be 100644 --- a/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/TlsConfigurationTest.scala +++ b/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/api/tls/TlsConfigurationTest.scala @@ -13,7 +13,6 @@ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec import java.io.InputStream -import java.net.ConnectException import java.nio.charset.StandardCharsets import java.nio.file.Files import java.security.Security @@ -109,44 +108,6 @@ class TlsConfigurationTest extends AnyWordSpec with Matchers with BeforeAndAfter IOUtils.toString(actual, StandardCharsets.UTF_8) shouldBe "private-key-123" } - "fail on missing secretsUrl when private key is encrypted ('.enc' file extension)" in { - // given - val keyFilePath = Files.createTempFile("private-key", ".enc") - Files.write(keyFilePath, "private-key-123".getBytes()) - assume(Files.readAllBytes(keyFilePath) sameElements "private-key-123".getBytes) - val keyFile = keyFilePath.toFile - val tested = TlsConfiguration.Empty - - // when - val e = intercept[PrivateKeyDecryptionException] { - val _: InputStream = tested.prepareKeyInputStream(keyFile) - } - - // then - e.getCause shouldBe a[IllegalStateException] - e.getCause.getMessage should endWith("cannot decrypt keyFile without secretsUrl.") - } - - "attempt to decrypt private key using by fetching decryption params from an url" in { - // given - val keyFilePath = Files.createTempFile("private-key", ".enc") - Files.write(keyFilePath, "private-key-123".getBytes()) - assume(Files.readAllBytes(keyFilePath) sameElements "private-key-123".getBytes) - val keyFile = keyFilePath.toFile - val tested = TlsConfiguration.Empty.copy( - secretsUrl = Some(() => throw new ConnectException("Mocked url 123")) - ) - - // when - val e = intercept[PrivateKeyDecryptionException] { - val _: InputStream = tested.prepareKeyInputStream(keyFile) - } - - // then We are not interested in decryption details (as that part is tested elsewhere). - // We only want to verify that the decryption code path was hit (as opposed to the no-decryption code path when private key is in plaintext) - e.getCause shouldBe a[ConnectException] - e.getCause.getMessage shouldBe "Mocked url 123" - } } private def configWithProtocols(minTls: Option[TlsVersion]): Option[TlsConfiguration] = { diff --git a/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/error/testpackage/subpackage/MildErrorsParent.scala b/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/error/testpackage/subpackage/MildErrorsParent.scala index 93ce757d9..bb2176573 100644 --- a/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/error/testpackage/subpackage/MildErrorsParent.scala +++ b/community/ledger/ledger-common/src/test/scala/com/digitalasset/canton/ledger/error/testpackage/subpackage/MildErrorsParent.scala @@ -29,12 +29,8 @@ object MildErrorsParent extends ErrorGroup()(ErrorClass.root()) { override def cause: String = "Some obscure cause" - override def resources: Seq[(ErrorResource, String)] = Seq( - (ErrorResource.LedgerId, LedgerIdResource) - ) + override def resources: Seq[(ErrorResource, String)] = Seq.empty } - - private[error] val LedgerIdResource = "some ledger id" } } diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala index be4f5f9c1..fb08e909b 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/fetchcontracts/util/IdentifierConverters.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.fetchcontracts.util import com.daml.lf -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 import com.digitalasset.canton.http.domain.ContractTypeId object IdentifierConverters { diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/domain.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/domain.scala index a5df93be7..5af044dad 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/domain.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/domain.scala @@ -48,10 +48,6 @@ package object domain { type ResolvedContractRef[LfV] = (ContractTypeId.Template.RequiredPkg, LfV) \/ (ContractTypeId.RequiredPkg, ContractId) - type LedgerIdTag = lar.LedgerIdTag - type LedgerId = lar.LedgerId - val LedgerId = lar.LedgerId - type ApplicationIdTag = lar.ApplicationIdTag type ApplicationId = lar.ApplicationId val ApplicationId = lar.ApplicationId diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/CreateAndExercise.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/CreateAndExercise.scala index ae8cce961..3040c8ef6 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/CreateAndExercise.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/CreateAndExercise.scala @@ -18,7 +18,7 @@ import com.digitalasset.canton.http.util.FutureUtil.{either, eitherT} import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} import com.digitalasset.canton.http.util.JwtParties.* import com.daml.jwt.domain.Jwt -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 import lav2.value.{Record as ApiRecord, Value as ApiValue} import scalaz.std.scalaFuture.* import scalaz.{-\/, EitherT, \/, \/-} diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/RouteSetup.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/RouteSetup.scala index d709d1b43..2e8a6d801 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/RouteSetup.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/endpoints/RouteSetup.scala @@ -24,7 +24,7 @@ import com.digitalasset.canton.http.Endpoints import com.digitalasset.canton.http.util.FutureUtil.{either, eitherT} import com.digitalasset.canton.http.util.Logging.{InstanceUUID, RequestID} import com.daml.jwt.domain.Jwt -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 import lav2.value.Value as ApiValue import scalaz.std.scalaFuture.* import scalaz.syntax.std.option.* diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonDecoder.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonDecoder.scala index 56319e0af..be635b604 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonDecoder.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonDecoder.scala @@ -8,7 +8,7 @@ import JsValueToApiValueConverter.mustBeApiRecord import com.digitalasset.canton.http.util.FutureUtil.either import com.digitalasset.canton.http.util.Logging.InstanceUUID import com.daml.jwt.domain.Jwt -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 import com.daml.logging.LoggingContextOf import com.digitalasset.canton.http.domain.{ContractTypeId, HasTemplateId} import com.digitalasset.canton.http.{PackageService, domain} diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonEncoder.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonEncoder.scala index 95dab7b57..ff3916794 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonEncoder.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/DomainJsonEncoder.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.http.json import com.digitalasset.canton.http.domain -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 import scalaz.\/ import scalaz.syntax.bitraverse.* import scalaz.syntax.show.* diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala index 479031136..7fd15a302 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/json/JsonProtocol.scala @@ -22,8 +22,6 @@ import com.digitalasset.canton.daml.lf.value.json.ApiCodecCompressed object JsonProtocol extends JsonProtocolLow { - implicit val LedgerIdFormat: JsonFormat[lar.LedgerId] = taggedJsonFormat[String, lar.LedgerIdTag] - implicit val ApplicationIdFormat: JsonFormat[lar.ApplicationId] = taggedJsonFormat[String, lar.ApplicationIdTag] diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala index 4009f0bf5..f84ec7188 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/ClientUtil.scala @@ -8,7 +8,7 @@ import java.util.UUID import com.digitalasset.canton.ledger.api.refinements.ApiTypes.{CommandId, Party} import com.daml.ledger.api.v2.transaction_filter.{Filters} import com.daml.ledger.api.v2.transaction_filter.{TransactionFilter} -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 object ClientUtil { def uniqueId(): String = UUID.randomUUID.toString diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala index 609263530..10bfbb8a4 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/Commands.scala @@ -4,7 +4,6 @@ package com.digitalasset.canton.http.util import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.daml.ledger.api.{v2 as lav2} import com.daml.ledger.api.v2 as lav2 import com.digitalasset.canton.http.domain import com.digitalasset.canton.topology.DomainId diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala index 8c506818d..353235239 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/http/util/IdentifierConverters.scala @@ -5,7 +5,7 @@ package com.digitalasset.canton.http.util import com.daml.lf import com.digitalasset.canton.ledger.api.refinements.ApiTypes as lar -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 import com.digitalasset.canton.fetchcontracts.util.IdentifierConverters as FC import com.digitalasset.canton.http diff --git a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/pureconfigutils/SharedConfigReaders.scala b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/pureconfigutils/SharedConfigReaders.scala index 78d6ac57d..b15d04071 100644 --- a/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/pureconfigutils/SharedConfigReaders.scala +++ b/community/ledger/ledger-json-api/src/main/scala/com/digitalasset/canton/pureconfigutils/SharedConfigReaders.scala @@ -3,17 +3,9 @@ package com.digitalasset.canton.pureconfigutils -import org.apache.pekko.http.scaladsl.model.Uri -import com.auth0.jwt.algorithms.Algorithm -import com.daml.jwt.{ECDSAVerifier, HMAC256Verifier, JwksVerifier, JwtVerifierBase, RSA256Verifier} -import pureconfig.error.{CannotConvert, ConvertFailure, FailureReason} -import pureconfig.{ConfigObjectCursor, ConfigReader, ConvertHelpers} -import com.daml.jwt.{Error as JwtError} -import scalaz.\/ -import scalaz.syntax.std.option.* +import pureconfig.error.{CannotConvert, FailureReason} import java.nio.file.Path -import com.daml.metrics.api.reporters.MetricsReporter final case class HttpServerConfig( address: String = com.digitalasset.canton.cliopts.Http.defaultAddress, @@ -21,47 +13,6 @@ final case class HttpServerConfig( portFile: Option[Path] = None, ) -object TokenVerifierConfig { - private val knownTokenVerifiers: Map[String, String => JwtError \/ JwtVerifierBase] = - Map( - "rs256-crt" -> (RSA256Verifier.fromCrtFile(_)), - "es256-crt" -> (ECDSAVerifier - .fromCrtFile(_, Algorithm.ECDSA256(_, null))), - "es512-crt" -> (ECDSAVerifier - .fromCrtFile(_, Algorithm.ECDSA512(_, null))), - "rs256-jwks" -> (valueStr => - \/.attempt(JwksVerifier(valueStr))(e => JwtError(Symbol("RS256"), e.getMessage)) - ), - ) - private val unsafeTokenVerifier: (String, String => JwtError \/ JwtVerifierBase) = - "hs256-unsafe" -> (HMAC256Verifier(_)) - - def extractByType( - typeStr: String, - valueStr: String, - objectCursor: ConfigObjectCursor, - ): ConfigReader.Result[JwtVerifierBase] = { - def convertFailure(msg: String) = { - ConfigReader.Result.fail( - ConvertFailure( - CannotConvert(typeStr, "JwtVerifier", msg), - objectCursor, - ) - ) - } - (knownTokenVerifiers + unsafeTokenVerifier) - .get(typeStr) - .cata( - { conv => - conv(valueStr).fold( - err => convertFailure(s"Failed to create $typeStr verifier: $err"), - (Right(_)), - ) - }, - convertFailure(s"value not one of ${knownTokenVerifiers.keys.mkString(", ")}"), - ) - } -} object SharedConfigReaders { def catchConvertError[A, B](f: String => Either[String, B])(implicit @@ -69,24 +20,4 @@ object SharedConfigReaders { ): String => Either[FailureReason, B] = s => f(s).left.map(CannotConvert(s, B.toString, _)) - implicit val tokenVerifierCfgRead: ConfigReader[JwtVerifierBase] = - ConfigReader.fromCursor { cur => - for { - objCur <- cur.asObjectCursor - typeCur <- objCur.atKey("type") - typeStr <- typeCur.asString - valueCur <- objCur.atKey("uri") - valueStr <- valueCur.asString - ident <- TokenVerifierConfig.extractByType(typeStr, valueStr, objCur) - } yield ident - } - - implicit val uriCfgReader: ConfigReader[Uri] = - ConfigReader.fromString[Uri](ConvertHelpers.catchReadError(s => Uri(s))) - - implicit val metricReporterReader: ConfigReader[MetricsReporter] = { - ConfigReader.fromString[MetricsReporter](ConvertHelpers.catchReadError { s => - MetricsReporter.parseMetricsReporter(s.toLowerCase()) - }) - } } diff --git a/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml b/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml index 91b8c75b7..13c78c808 100644 --- a/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml +++ b/community/ledger/ledger-json-api/src/test/daml/v2_1/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: JsonEncodingTest diff --git a/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml b/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml index dcc49cd82..c35760a3b 100644 --- a/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml +++ b/community/ledger/ledger-json-api/src/test/daml/v2_dev/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.dev name: JsonEncodingTestDev diff --git a/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala b/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala index 4a16721a1..bb24df98f 100644 --- a/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala +++ b/community/ledger/ledger-json-api/src/test/scala/com/digitalasset/canton/http/PackageServiceTest.scala @@ -9,7 +9,7 @@ import com.digitalasset.canton.http.Generators.{ nonEmptySetOf, } import com.digitalasset.canton.http.PackageService.TemplateIdMap -import com.daml.ledger.api.{v2 as lav2} +import com.daml.ledger.api.v2 as lav2 import org.scalacheck.Shrink import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks import org.scalatest.Inside diff --git a/community/lib/wartremover/src/main/scala/com/digitalasset/canton/RequireBlocking.scala b/community/lib/wartremover/src/main/scala/com/digitalasset/canton/RequireBlocking.scala index e43f71ab5..10c717ae2 100644 --- a/community/lib/wartremover/src/main/scala/com/digitalasset/canton/RequireBlocking.scala +++ b/community/lib/wartremover/src/main/scala/com/digitalasset/canton/RequireBlocking.scala @@ -15,7 +15,7 @@ import org.wartremover.{WartTraverser, WartUniverse} */ object RequireBlocking extends WartTraverser { - val messageSynchronized: String = "synchronized calls must be surrounded by blocking" + val messageSynchronized: String = "synchronized blocks must be surrounded by blocking" val messageThreadSleep: String = "Use Threading.sleep instead of Thread.sleep" def apply(u: WartUniverse): u.Traverser = { @@ -38,12 +38,12 @@ object RequireBlocking extends WartTraverser { Apply(TypeApply(Select(receiver, `synchronizedName`), _tyarg2), List(body)) ), ) if blocking.symbol.fullName == blockingFullName => - // Look for further synchronized calls in the receiver and the body + // Look for further synchronized blocks in the receiver and the body // Even if they are Syntactically inside a `blocking` call, // we still want to be conservative as further synchronized calls may escape the blocking call // due to lazy evaluation. // - // This heuristics will give false positives on immediately nested synchronized calls. + // This heuristics will give false positives on immediately nested synchronized blocks. traverse(receiver) traverse(body) case Select(_receiver, synchronizedN) if synchronizedN.toTermName == synchronizedName => diff --git a/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SynchronizedFuture.scala b/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SynchronizedFuture.scala new file mode 100644 index 000000000..6435c4630 --- /dev/null +++ b/community/lib/wartremover/src/main/scala/com/digitalasset/canton/SynchronizedFuture.scala @@ -0,0 +1,99 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.digitalasset.canton + +import org.wartremover.{WartTraverser, WartUniverse} + +import scala.annotation.StaticAnnotation + +/** When a `synchronized` block returns a [[scala.concurrent.Future]], then the synchronization typically + * does not extend to the computations performed inside the future. + * This often hides a concurrency bug that is hard to spot during review + * because the computation's code is lexically inside the synchronized block + * and it may not be obvious that [[scala.concurrent.Future]]s are in play. + * + * For example, `synchronized` blocks are often used to read and update the state of an object + * atomically. When the update operation involves a [[scala.concurrent.Future]] like below, + * then the write to the state is actually not guarded by the synchronized call and the update is not atomic. + * + *
+  *   synchronized {
+  *     val x = this.atomicRef.get()
+  *     futureComputation(x).map { y =>
+  *       this.atomicRef.set(y)
+  *       y
+  *     }
+  *   }
+  * 
+ * + * The proper approach in the above example is to use a semaphore instead of a `synchronized` block. + * + *
+  *   blocking { this.semaphore.acquire() }
+  *   val x = this.atomicRef.get()
+  *   futureComputation(x).map { y =>
+  *     this.atomicRef.set(y)
+  *     y
+  *   }.thereafter { _ => this.semaphore.release() }
+  * 
+ * + * If the synchronization intentionally does not have to extend over the [[scala.concurrent.Future]] computation, + * it usually helps with readability to move the future out of the synchronized block. For example, + * + *
+  *   blocking(synchronized {
+  *     val x = criticalSection()
+  *     Future { nonCriticalSection(x) }
+  *   })
+  * 
+ * + * should be written as follows: + * + *
+  *   val x = blocking(synchronized { criticalSection() })
+  *   Future { nonCriticalSection(x) }
+  * 
+ * + * There are cases where the `synchronized` block is supposed to return a [[scala.concurrent.Future]], + * for example when dealing with the promise of a future. In such a case, the warning should simply be suppressed locally. + */ +object SynchronizedFuture extends WartTraverser { + + val messageSynchronized: String = "synchronized blocks must not return a Future" + + def apply(u: WartUniverse): u.Traverser = { + import u.universe.* + + val synchronizedName = TermName("synchronized") + + val futureLikeType = typeOf[DoNotReturnFromSynchronizedLikeFuture] + val futureLikeTester = FutureLikeTester.tester(u)(futureLikeType) + + new Traverser { + + override def traverse(tree: Tree): Unit = { + tree match { + // Ignore trees marked by SuppressWarnings + case t if hasWartAnnotation(u)(t) => + case Apply(TypeApply(Select(receiver, `synchronizedName`), _tyarg2), List(body)) => + val tpe = tree.tpe + if (tpe != null && futureLikeTester(tpe.dealias)) { + error(u)(tree.pos, messageSynchronized) + } else { + // Keep looking for other instances in the receiver and the body + traverse(receiver) + traverse(body) + } + case _ => + super.traverse(tree) + } + } + } + } +} + +/** Annotated type constructors will be treated like a [[scala.concurrent.Future]] + * when looking at the return types of synchronized blocks. + */ +final class DoNotReturnFromSynchronizedLikeFuture extends StaticAnnotation diff --git a/community/lib/wartremover/src/test/scala/com/digitalasset/canton/RequireBlockingTest.scala b/community/lib/wartremover/src/test/scala/com/digitalasset/canton/RequireBlockingTest.scala index 090dfa7d9..69b63958b 100644 --- a/community/lib/wartremover/src/test/scala/com/digitalasset/canton/RequireBlockingTest.scala +++ b/community/lib/wartremover/src/test/scala/com/digitalasset/canton/RequireBlockingTest.scala @@ -54,7 +54,7 @@ class RequireBlockingTest extends AnyWordSpec with Matchers { result.errors.foreach { _ should include(RequireBlocking.messageSynchronized) } } - "detect nested synchronized calls in the body" in { + "detect nested synchronized blocks in the body" in { // Technically we shouldn't require another blocking around the inner synchronized, // but that's a false positive we can live with as nested synchronization calls are anyway // dangerous for their deadlock potential. @@ -64,7 +64,7 @@ class RequireBlockingTest extends AnyWordSpec with Matchers { assertIsErrorSynchronized(result) } - "detect escaping synchronized calls in the body" in { + "detect escaping synchronized blocks in the body" in { // The inner synchronize call escapes the blocking scope val result = WartTestTraverser(RequireBlocking) { val f = blocking { diff --git a/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SynchronizedFutureTest.scala b/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SynchronizedFutureTest.scala new file mode 100644 index 000000000..89c7c1bac --- /dev/null +++ b/community/lib/wartremover/src/test/scala/com/digitalasset/canton/SynchronizedFutureTest.scala @@ -0,0 +1,105 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. +// Proprietary code. All rights reserved. + +package com.digitalasset.canton + +import cats.data.{EitherT, OptionT} +import cats.syntax.either.* +import org.scalatest.Assertion +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec +import org.wartremover.test.WartTestTraverser + +import scala.concurrent.{Future, Promise} + +class SynchronizedFutureTest extends AnyWordSpec with Matchers { + import SynchronizedFutureTest.* + + def assertIsErrorSynchronized(result: WartTestTraverser.Result): Assertion = { + result.errors.length shouldBe 1 + result.errors.foreach { _ should include(SynchronizedFuture.messageSynchronized) } + succeed + } + + "SynchronizedFuture" should { + "detect qualified synchronized statements" in { + val result = WartTestTraverser(SynchronizedFuture) { + this.synchronized { Future.unit } + } + assertIsErrorSynchronized(result) + } + + "work without qualifier" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { Future.unit } + } + assertIsErrorSynchronized(result) + } + + "detect nested synchronized statements in the body" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { + synchronized { + Future.unit + } + } + } + assertIsErrorSynchronized(result) + } + + "detect nested synchronized statements in the receiver" in { + val result = WartTestTraverser(SynchronizedFuture) { + { synchronized { Future.unit } }.synchronized { 23 } + } + assertIsErrorSynchronized(result) + } + + "allow non-future types in synchronized blocks" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { 42 } + } + result.errors.length shouldBe 0 + } + + "report a false positive when futures are data" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { + Promise[Unit]().future + } + } + assertIsErrorSynchronized(result) + } + + "detects futures wrapped in an EitherT" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { EitherT(Future.successful(Either.right(()))) } + } + assertIsErrorSynchronized(result) + } + + "detects futures wrapped in an OptionT" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { OptionT(Future.successful(Option(()))) } + } + assertIsErrorSynchronized(result) + } + + "detects futures that are deeply wrapped" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { OptionT(EitherT(Future.successful(Either.right(Option(()))))) } + } + assertIsErrorSynchronized(result) + } + + "detect future-like types" in { + val result = WartTestTraverser(SynchronizedFuture) { + synchronized { new LooksLikeAFuture() } + } + assertIsErrorSynchronized(result) + } + } +} + +object SynchronizedFutureTest { + @DoNotReturnFromSynchronizedLikeFuture class LooksLikeAFuture +} diff --git a/community/participant/src/main/daml/daml.yaml b/community/participant/src/main/daml/daml.yaml index ddc29e361..9996dca44 100644 --- a/community/participant/src/main/daml/daml.yaml +++ b/community/participant/src/main/daml/daml.yaml @@ -1,4 +1,4 @@ -sdk-version: 3.0.0-snapshot.20240312.12878.0.v540a7460 +sdk-version: 3.0.0-snapshot.20240318.12913.0.v1c415c97 build-options: - --target=2.1 name: AdminWorkflows diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeCommon.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeCommon.scala index 5b48fba9c..12effd4cd 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeCommon.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeCommon.scala @@ -27,7 +27,6 @@ import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.metrics.Metrics as LedgerApiServerMetrics import com.digitalasset.canton.participant.admin.grpc.* import com.digitalasset.canton.participant.admin.{ - DomainConnectivityService, MutablePackageNameMapResolver, PackageDependencyResolver, PackageOps, @@ -43,6 +42,7 @@ import com.digitalasset.canton.participant.ledger.api.CantonLedgerApiServerWrapp } import com.digitalasset.canton.participant.ledger.api.* import com.digitalasset.canton.participant.metrics.ParticipantMetrics +import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor import com.digitalasset.canton.participant.scheduler.{ ParticipantSchedulersParameters, SchedulersWithParticipantPruning, @@ -83,7 +83,6 @@ class CantonLedgerApiServerFactory( ) extends NamedLogging { def create( name: InstanceName, - ledgerId: String, participantId: LedgerParticipantId, sync: CantonSyncService, participantNodePersistentState: Eval[ParticipantNodePersistentState], @@ -141,7 +140,6 @@ class CantonLedgerApiServerFactory( jsonApiConfig = config.httpLedgerApiExperimental.map(_.toConfig), indexerConfig = parameters.ledgerApiServerParameters.indexer, indexerHaConfig = indexerHaConfig, - ledgerId = ledgerId, participantId = participantId, engine = engine, syncService = sync, @@ -232,6 +230,12 @@ trait ParticipantNodeBootstrapCommon { timeouts, ) + lazy val syncDomainAcsCommitmentProcessorHealth: MutableHealthComponent = + MutableHealthComponent( + loggerFactory, + AcsCommitmentProcessor.healthName, + timeouts, + ) protected def setPostInitCallbacks(sync: CantonSyncService): Unit protected def createParticipantServices( @@ -399,8 +403,6 @@ trait ParticipantNodeBootstrapCommon { ) .mapK(FutureUnlessShutdown.outcomeK) - ledgerId = participantId.uid.id.unwrap - resourceManagementService = resourceManagementServiceFactory( persistentState.map(_.settingsStore) ) @@ -455,12 +457,12 @@ trait ParticipantNodeBootstrapCommon { syncDomainHealth.set(sync.syncDomainHealth) syncDomainEphemeralHealth.set(sync.ephemeralHealth) syncDomainSequencerClientHealth.set(sync.sequencerClientHealth) + syncDomainAcsCommitmentProcessorHealth.set(sync.acsCommitmentProcessorHealth) } ledgerApiServer <- ledgerApiServerFactory .create( name, - ledgerId = ledgerId, participantId = participantId.toLf, sync = sync, participantNodePersistentState = persistentState, @@ -495,14 +497,6 @@ trait ParticipantNodeBootstrapCommon { ledgerApiDependentServices, ) - val stateService = new DomainConnectivityService( - sync, - domainAliasManager, - parameterConfig.processingTimeouts, - sequencerInfoLoader, - loggerFactory, - ) - adminServerRegistry .addServiceU( TrafficControlServiceGrpc.bindService( @@ -520,7 +514,16 @@ trait ParticipantNodeBootstrapCommon { adminServerRegistry .addServiceU( DomainConnectivityServiceGrpc - .bindService(new GrpcDomainConnectivityService(stateService), executionContext) + .bindService( + new GrpcDomainConnectivityService( + sync, + domainAliasManager, + parameterConfig.processingTimeouts, + sequencerInfoLoader, + loggerFactory, + ), + executionContext, + ) ) adminServerRegistry .addServiceU( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeX.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeX.scala index dfe8b6f7d..63099fb58 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeX.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ParticipantNodeX.scala @@ -83,6 +83,7 @@ class ParticipantNodeBootstrapX( _ => Future.successful(SchedulersWithParticipantPruning.noop), private[canton] val persistentStateFactory: ParticipantNodePersistentStateFactory, ledgerApiServerFactory: CantonLedgerApiServerFactory, + setInitialized: () => Unit, )(implicit executionContext: ExecutionContextIdlenessExecutorService, scheduler: ScheduledExecutorService, @@ -335,6 +336,9 @@ class ParticipantNodeBootstrapX( ) addCloseable(node) Some(new RunningNode(bootstrapStageCallback, node)) + }.map { node => + setInitialized() + node } } } @@ -349,7 +353,12 @@ class ParticipantNodeBootstrapX( criticalDependencies = Seq(storage), // The sync service won't be reporting Ok until the node is initialized, but that shouldn't prevent traffic from // reaching the node - Seq(syncDomainHealth, syncDomainEphemeralHealth, syncDomainSequencerClientHealth), + Seq( + syncDomainHealth, + syncDomainEphemeralHealth, + syncDomainSequencerClientHealth, + syncDomainAcsCommitmentProcessorHealth, + ), ) override protected def setPostInitCallbacks( @@ -387,6 +396,7 @@ object ParticipantNodeBootstrapX { createReplicationServiceFactory(arguments), persistentStateFactory = ParticipantNodePersistentStateFactory, ledgerApiServerFactory = ledgerApiServerFactory, + setInitialized = () => (), ) } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/DomainConnectivityService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/DomainConnectivityService.scala deleted file mode 100644 index af761dead..000000000 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/DomainConnectivityService.scala +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 - -package com.digitalasset.canton.participant.admin - -import cats.data.EitherT -import cats.syntax.either.* -import cats.syntax.parallel.* -import com.digitalasset.canton.DomainAlias -import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure -import com.digitalasset.canton.admin.participant.v30 -import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader -import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader.SequencerAggregatedInfo -import com.digitalasset.canton.config.ProcessingTimeout -import com.digitalasset.canton.lifecycle.CloseContext -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} -import com.digitalasset.canton.participant.domain.* -import com.digitalasset.canton.participant.sync.CantonSyncService.ConnectDomain -import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceInternalError.DomainIsMissingInternally -import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceUnknownDomain -import com.digitalasset.canton.participant.sync.{CantonSyncService, SyncServiceError} -import com.digitalasset.canton.topology.DomainId -import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import com.digitalasset.canton.util.EitherTUtil -import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.ShowUtil.* -import io.grpc.StatusRuntimeException - -import scala.concurrent.{ExecutionContext, Future} - -class DomainConnectivityService( - sync: CantonSyncService, - aliasManager: DomainAliasManager, - timeouts: ProcessingTimeout, - sequencerInfoLoader: SequencerInfoLoader, - protected val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext) - extends NamedLogging { - - import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* - - private def waitUntilActiveIfSuccess(success: Boolean, domain: DomainAlias)(implicit - traceContext: TraceContext - ): EitherT[Future, StatusRuntimeException, Unit] = - if (success) waitUntilActive(domain) else EitherT.rightT(()) - - private def waitUntilActive( - domain: DomainAlias - )(implicit traceContext: TraceContext): EitherT[Future, StatusRuntimeException, Unit] = { - val clientE = for { - domainId <- aliasManager - .domainIdForAlias(domain) - .toRight(DomainIsMissingInternally(domain, "aliasManager")) - client <- sync.syncCrypto.ips - .forDomain(domainId) - .toRight(DomainIsMissingInternally(domain, "ips")) - } yield client - for { - client <- mapErrNew(clientE) - active <- EitherT - .right(client.await(_.isParticipantActive(sync.participantId), timeouts.network.unwrap)) - .onShutdown(Right(true)) // don't emit ugly warnings on shutdown - _ <- mapErrNew( - Either - .cond( - active, - (), - DomainRegistryError.ConnectionErrors.ParticipantIsNotActive.Error( - s"While domain $domain promised, participant ${sync.participantId} never became active within a reasonable timeframe." - ), - ) - ) - } yield () - - } - - def connectDomain(domainAlias: String, keepRetrying: Boolean)(implicit - traceContext: TraceContext - ): Future[v30.ConnectDomainResponse] = - for { - alias <- Future( - DomainAlias - .create(domainAlias) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLoggingStr(err).asGrpcError) - ) - success <- mapErrNewETUS( - sync.connectDomain(alias, keepRetrying, ConnectDomain.Connect) - ) - .valueOr(throw _) - _ <- waitUntilActiveIfSuccess(success, alias).valueOr(throw _) - } yield v30.ConnectDomainResponse(connectedSuccessfully = success) - - def disconnectDomain( - domainAlias: String - )(implicit traceContext: TraceContext): Future[v30.DisconnectDomainResponse] = - for { - alias <- Future( - DomainAlias - .create(domainAlias) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLoggingStr(err).asGrpcError) - ) - _ <- mapErrNewETUS(sync.disconnectDomain(alias)).valueOr(throw _) - } yield v30.DisconnectDomainResponse() - - def listConnectedDomains(): Future[v30.ListConnectedDomainsResponse] = - Future.successful(v30.ListConnectedDomainsResponse(sync.readyDomains.map { - case (alias, (domainId, healthy)) => - new v30.ListConnectedDomainsResponse.Result( - domainAlias = alias.unwrap, - domainId = domainId.toProtoPrimitive, - healthy = healthy, - ) - }.toSeq)) - - def listConfiguredDomains(): Future[v30.ListConfiguredDomainsResponse] = { - val connected = sync.readyDomains - val configuredDomains = sync.configuredDomains - Future.successful( - v30.ListConfiguredDomainsResponse( - results = configuredDomains - .filter(_.status.isActive) - .map(_.config) - .map(cnf => - new v30.ListConfiguredDomainsResponse.Result( - config = Some(cnf.toProtoV30), - connected = connected.contains(cnf.domain), - ) - ) - ) - ) - } - - def registerDomain( - request: v30.DomainConnectionConfig, - handshakeOnly: Boolean, - )(implicit traceContext: TraceContext): Future[v30.RegisterDomainResponse] = { - - val connectDomain = if (handshakeOnly) ConnectDomain.HandshakeOnly else ConnectDomain.Register - - for { - conf <- Future( - DomainConnectionConfig - .fromProtoV30(request) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError) - ) - - _ <- - if (conf.manualConnect && handshakeOnly) - Future.failed( - SyncServiceError.InvalidArgument - .Error("For handshakeOnly to be useful, manualConnect should be set to false") - .asGrpcError - ) - else Future.unit - - _ = logger.info(show"Registering ${request.domainAlias} with ${conf}") - _ <- mapErrNewET(sync.addDomain(conf)).valueOr(throw _) - - _ <- - if (!conf.manualConnect) for { - success <- mapErrNewETUS( - sync.connectDomain( - conf.domain, - keepRetrying = false, - connectDomain = connectDomain, - ) - ) - .valueOr(throw _) - _ <- waitUntilActiveIfSuccess(success, conf.domain).valueOr(throw _) - } yield () - else Future.unit - } yield v30.RegisterDomainResponse() - } - - def modifyDomain( - request: v30.DomainConnectionConfig - )(implicit traceContext: TraceContext): Future[v30.ModifyDomainResponse] = - for { - conf <- Future( - DomainConnectionConfig - .fromProtoV30(request) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLogging(err).asGrpcError) - ) - _ <- mapErrNewET(sync.modifyDomain(conf)).valueOr(throw _) - } yield v30.ModifyDomainResponse() - - private def getSequencerAggregatedInfo(domainAlias: String)(implicit - traceContext: TraceContext - ): Future[SequencerAggregatedInfo] = { - for { - alias <- Future( - DomainAlias - .create(domainAlias) - .valueOr(err => throw ProtoDeserializationFailure.WrapNoLoggingStr(err).asGrpcError) - ) - connectionConfig <- mapErrNewET( - sync - .domainConnectionConfigByAlias(alias) - .leftMap(_ => SyncServiceUnknownDomain.Error(alias)) - .map(_.config) - ).valueOr(throw _) - result <- - sequencerInfoLoader - .loadSequencerEndpoints(connectionConfig.domain, connectionConfig.sequencerConnections)( - traceContext, - CloseContext(sync), - ) - .valueOr(err => throw DomainRegistryError.fromSequencerInfoLoaderError(err).asGrpcError) - _ <- aliasManager - .processHandshake(connectionConfig.domain, result.domainId) - .leftMap(DomainRegistryHelpers.fromDomainAliasManagerError) - .valueOr(err => throw err.asGrpcError) - } yield result - } - - def reconnectDomains(ignoreFailures: Boolean): Future[Unit] = { - implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - val ret = for { - aliases <- mapErrNewETUS(sync.reconnectDomains(ignoreFailures = ignoreFailures)) - _ <- aliases.parTraverse(waitUntilActive) - } yield () - EitherTUtil.toFuture(ret) - } - - def getDomainId(domainAlias: String)(implicit traceContext: TraceContext): Future[DomainId] = - getSequencerAggregatedInfo(domainAlias).map(_.domainId) - -} diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContract.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContract.scala index 8baf66a9c..29d81b7b2 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContract.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/data/ActiveContract.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.serialization.ProtoConverter.ParsingResult import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.util.{ByteStringUtil, ResourceUtil} import com.digitalasset.canton.version.* -import com.digitalasset.canton.{ProtoDeserializationError, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{ProtoDeserializationError, TransferCounter} import com.google.protobuf.ByteString import java.io.{ByteArrayInputStream, InputStream} @@ -20,7 +20,7 @@ import java.io.{ByteArrayInputStream, InputStream} final case class ActiveContract private ( domainId: DomainId, contract: SerializableContract, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, )(protocolVersion: ProtocolVersion) extends HasProtocolVersionedWrapper[ActiveContract] with HasDomainId @@ -30,11 +30,7 @@ final case class ActiveContract private ( protocolVersion.toProtoPrimitive, domainId.toProtoPrimitive, contract.toByteString(protocolVersion), - transferCounter - .getOrElse( - throw new IllegalStateException("Reassignment counter is required but was empty") - ) - .toProtoPrimitive, + transferCounter.toProtoPrimitive, ) } @@ -69,7 +65,7 @@ private[canton] object ActiveContract extends HasProtocolVersionedCompanion[Acti domainId <- DomainId.fromProtoPrimitive(proto.domainId, "domain_id") contract <- SerializableContract.fromByteString(proto.contract) transferCounter = proto.reassignmentCounter - activeContract <- create(domainId, contract, Some(TransferCounter(transferCounter)))( + activeContract <- create(domainId, contract, TransferCounter(transferCounter))( protocolVersion ).leftMap(_.toProtoDeserializationError) } yield { @@ -85,7 +81,7 @@ private[canton] object ActiveContract extends HasProtocolVersionedCompanion[Acti def create( domainId: DomainId, contract: SerializableContract, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, )( protocolVersion: ProtocolVersion ): Either[InvalidActiveContract, ActiveContract] = @@ -94,7 +90,7 @@ private[canton] object ActiveContract extends HasProtocolVersionedCompanion[Acti ActiveContract( domainId: DomainId, contract: SerializableContract, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, )(protocolVersion) ) .leftMap(iae => new InvalidActiveContract(iae.getMessage)) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcDomainConnectivityService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcDomainConnectivityService.scala index 8fd40dc73..eb65a917c 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcDomainConnectivityService.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/grpc/GrpcDomainConnectivityService.scala @@ -3,79 +3,258 @@ package com.digitalasset.canton.participant.admin.grpc +import cats.data.EitherT +import cats.syntax.bifunctor.* +import com.digitalasset.canton.DomainAlias +import com.digitalasset.canton.ProtoDeserializationError.ProtoDeserializationFailure +import com.digitalasset.canton.admin.domain.v30 as domainV30 import com.digitalasset.canton.admin.participant.v30 -import com.digitalasset.canton.admin.participant.v30.* -import com.digitalasset.canton.participant.admin.DomainConnectivityService +import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader +import com.digitalasset.canton.config.ProcessingTimeout +import com.digitalasset.canton.error.BaseCantonError +import com.digitalasset.canton.lifecycle.{CloseContext, FutureUnlessShutdown} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil +import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.* +import com.digitalasset.canton.participant.domain.{ + DomainAliasManager, + DomainConnectionConfig, + DomainRegistryError, + DomainRegistryHelpers, +} +import com.digitalasset.canton.participant.sync.CantonSyncService.ConnectDomain +import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceInternalError.DomainIsMissingInternally +import com.digitalasset.canton.participant.sync.SyncServiceError.SyncServiceUnknownDomain +import com.digitalasset.canton.participant.sync.{CantonSyncService, SyncServiceError} +import com.digitalasset.canton.sequencing.SequencerConnectionValidation +import com.digitalasset.canton.serialization.ProtoConverter import com.digitalasset.canton.tracing.{TraceContext, TraceContextGrpc} -import io.grpc.Status +import com.digitalasset.canton.util.EitherTUtil +import com.digitalasset.canton.util.ShowUtil.* import scala.concurrent.{ExecutionContext, Future} -class GrpcDomainConnectivityService(service: DomainConnectivityService)(implicit +class GrpcDomainConnectivityService( + sync: CantonSyncService, + aliasManager: DomainAliasManager, + timeouts: ProcessingTimeout, + sequencerInfoLoader: SequencerInfoLoader, + protected val loggerFactory: NamedLoggerFactory, +)(implicit ec: ExecutionContext -) extends DomainConnectivityServiceGrpc.DomainConnectivityService { +) extends v30.DomainConnectivityServiceGrpc.DomainConnectivityService + with NamedLogging { + + private def waitUntilActiveIfSuccess(success: Boolean, domain: DomainAlias)(implicit + traceContext: TraceContext + ): EitherT[FutureUnlessShutdown, BaseCantonError, Unit] = + if (success) waitUntilActive(domain) else EitherT.rightT(()) + + private def waitUntilActive( + domain: DomainAlias + )(implicit traceContext: TraceContext): EitherT[FutureUnlessShutdown, BaseCantonError, Unit] = + for { + domainId <- EitherT.fromOption[FutureUnlessShutdown]( + aliasManager + .domainIdForAlias(domain), + DomainIsMissingInternally(domain, "aliasManager"), + ) + client <- EitherT.fromOption[FutureUnlessShutdown]( + sync.syncCrypto.ips + .forDomain(domainId), + DomainIsMissingInternally(domain, "ips"), + ) + active <- EitherT + .right(client.await(_.isParticipantActive(sync.participantId), timeouts.network.unwrap)) + _ <- + EitherT + .cond[FutureUnlessShutdown]( + active, + (), + DomainRegistryError.ConnectionErrors.ParticipantIsNotActive.Error( + s"While domain $domain promised, participant ${sync.participantId} never became active within `timeouts.network` (${timeouts.network})." + ): BaseCantonError, + ) + } yield () + + private def parseDomainAlias( + domainAliasProto: String + ): EitherT[FutureUnlessShutdown, BaseCantonError, DomainAlias] = + EitherT + .fromEither[FutureUnlessShutdown](DomainAlias.create(domainAliasProto)) + .leftMap(err => ProtoDeserializationFailure.WrapNoLoggingStr(err)) - override def connectDomain(request: ConnectDomainRequest): Future[ConnectDomainResponse] = { + private def parseDomainConnectionConfig( + proto: Option[v30.DomainConnectionConfig], + name: String, + ) = + EitherT + .fromEither[FutureUnlessShutdown]( + ProtoConverter.parseRequired(DomainConnectionConfig.fromProtoV30, name, proto) + ) + .leftMap(err => ProtoDeserializationFailure.WrapNoLogging(err)) + + private def parseSequencerConnectionValidation( + proto: domainV30.SequencerConnectionValidation + ) = + EitherT + .fromEither[FutureUnlessShutdown]( + SequencerConnectionValidation.fromProtoV30(proto) + ) + .leftMap(err => ProtoDeserializationFailure.WrapNoLogging(err)) + + override def connectDomain( + request: v30.ConnectDomainRequest + ): Future[v30.ConnectDomainResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - service.connectDomain(request.domainAlias, request.retry) + val v30.ConnectDomainRequest(domainAlias, keepRetrying) = request + val ret = for { + alias <- parseDomainAlias(domainAlias) + success <- sync.connectDomain(alias, keepRetrying, ConnectDomain.Connect) + _ <- waitUntilActiveIfSuccess(success, alias) + } yield v30.ConnectDomainResponse(connectedSuccessfully = success) + CantonGrpcUtil.mapErrNewEUS(ret) } override def disconnectDomain( - request: DisconnectDomainRequest - ): Future[DisconnectDomainResponse] = { + request: v30.DisconnectDomainRequest + ): Future[v30.DisconnectDomainResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - service.disconnectDomain(request.domainAlias) + val v30.DisconnectDomainRequest(domainAlias) = request + val ret = for { + alias <- parseDomainAlias(domainAlias) + _ <- sync.disconnectDomain(alias).leftWiden[BaseCantonError] + } yield v30.DisconnectDomainResponse() + CantonGrpcUtil.mapErrNewEUS(ret) } override def listConnectedDomains( - request: ListConnectedDomainsRequest - ): Future[ListConnectedDomainsResponse] = - service.listConnectedDomains() + request: v30.ListConnectedDomainsRequest + ): Future[v30.ListConnectedDomainsResponse] = + Future.successful(v30.ListConnectedDomainsResponse(sync.readyDomains.map { + case (alias, (domainId, healthy)) => + new v30.ListConnectedDomainsResponse.Result( + domainAlias = alias.unwrap, + domainId = domainId.toProtoPrimitive, + healthy = healthy, + ) + }.toSeq)) override def listConfiguredDomains( - request: ListConfiguredDomainsRequest - ): Future[ListConfiguredDomainsResponse] = - service.listConfiguredDomains() - - private def nonEmptyProcess[T, E](valueO: Option[T], use: T => Future[E]): Future[E] = - valueO match { - case None => - Future.failed( - Status.INVALID_ARGUMENT.withDescription("Empty request received").asRuntimeException() - ) - case Some(value) => use(value) - } + request: v30.ListConfiguredDomainsRequest + ): Future[v30.ListConfiguredDomainsResponse] = { + val connected = sync.readyDomains + val configuredDomains = sync.configuredDomains + Future.successful( + v30.ListConfiguredDomainsResponse( + results = configuredDomains + .filter(_.status.isActive) + .map(_.config) + .map(cnf => + new v30.ListConfiguredDomainsResponse.Result( + config = Some(cnf.toProtoV30), + connected = connected.contains(cnf.domain), + ) + ) + ) + ) + } - override def registerDomain(request: RegisterDomainRequest): Future[RegisterDomainResponse] = { + override def registerDomain( + request: v30.RegisterDomainRequest + ): Future[v30.RegisterDomainResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - nonEmptyProcess( - request.add, - (config: v30.DomainConnectionConfig) => - service.registerDomain(config, handshakeOnly = request.handshakeOnly), - ) + val v30.RegisterDomainRequest(addPO, handshakeOnly, sequencerConnectionValidationPO) = request + val connectDomain = if (handshakeOnly) ConnectDomain.HandshakeOnly else ConnectDomain.Register + val ret: EitherT[FutureUnlessShutdown, BaseCantonError, v30.RegisterDomainResponse] = for { + config <- parseDomainConnectionConfig(addPO, "add") + validation <- parseSequencerConnectionValidation(sequencerConnectionValidationPO) + _ <- EitherTUtil.condUnitET[FutureUnlessShutdown]( + !(config.manualConnect && handshakeOnly), + SyncServiceError.InvalidArgument + .Error("For handshakeOnly to be useful, manualConnect should be set to false"), + ) + _ = logger.info(show"Registering new domain $config") + _ <- sync.addDomain(config, validation).mapK(FutureUnlessShutdown.outcomeK) + _ <- + if (!config.manualConnect) for { + success <- + sync.connectDomain( + config.domain, + keepRetrying = false, + connectDomain = connectDomain, + ) + _ <- waitUntilActiveIfSuccess(success, config.domain) + } yield () + else EitherT.rightT[FutureUnlessShutdown, BaseCantonError](()) + } yield v30.RegisterDomainResponse() + CantonGrpcUtil.mapErrNewEUS(ret) } /** reconfigure a domain connection */ - override def modifyDomain(request: ModifyDomainRequest): Future[ModifyDomainResponse] = { + override def modifyDomain(request: v30.ModifyDomainRequest): Future[v30.ModifyDomainResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - nonEmptyProcess(request.modify, service.modifyDomain) + val v30.ModifyDomainRequest(config, sequencerConnectionValidationPO) = request + val ret = for { + config <- parseDomainConnectionConfig(config, "modify") + validation <- parseSequencerConnectionValidation(sequencerConnectionValidationPO) + _ <- sync + .modifyDomain(config, validation) + .mapK(FutureUnlessShutdown.outcomeK) + .leftWiden[BaseCantonError] + } yield v30.ModifyDomainResponse() + mapErrNewEUS(ret) } /** reconnect to domains */ override def reconnectDomains( - request: ReconnectDomainsRequest - ): Future[ReconnectDomainsResponse] = - service.reconnectDomains(request.ignoreFailures).map(_ => ReconnectDomainsResponse()) + request: v30.ReconnectDomainsRequest + ): Future[v30.ReconnectDomainsResponse] = { + implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext + import cats.syntax.parallel.* + val v30.ReconnectDomainsRequest(ignoreFailures) = request + val ret = for { + aliases <- sync.reconnectDomains(ignoreFailures = ignoreFailures) + _ <- aliases.parTraverse(waitUntilActive) + } yield v30.ReconnectDomainsResponse() + CantonGrpcUtil.mapErrNewEUS(ret) + } /** Get the domain id of the given domain alias */ - override def getDomainId(request: GetDomainIdRequest): Future[GetDomainIdResponse] = { + override def getDomainId(request: v30.GetDomainIdRequest): Future[v30.GetDomainIdResponse] = { implicit val traceContext: TraceContext = TraceContextGrpc.fromGrpcContext - service.getDomainId(request.domainAlias).map { domainId => - GetDomainIdResponse(domainId = domainId.toProtoPrimitive) - } + val v30.GetDomainIdRequest(domainAlias) = request + val ret = for { + alias <- parseDomainAlias(domainAlias) + connectionConfig <- + sync + .domainConnectionConfigByAlias(alias) + .leftMap(_ => SyncServiceUnknownDomain.Error(alias)) + .map(_.config) + .mapK(FutureUnlessShutdown.outcomeK) + result <- + sequencerInfoLoader + .loadAndAggregateSequencerEndpoints( + connectionConfig.domain, + connectionConfig.sequencerConnections, + SequencerConnectionValidation.Active, + )( + traceContext, + CloseContext(sync), + ) + .leftMap(err => DomainRegistryError.fromSequencerInfoLoaderError(err)) + .mapK(FutureUnlessShutdown.outcomeK) + .leftWiden[BaseCantonError] + _ <- aliasManager + .processHandshake(connectionConfig.domain, result.domainId) + .leftMap(DomainRegistryHelpers.fromDomainAliasManagerError) + .mapK(FutureUnlessShutdown.outcomeK) + .leftWiden[BaseCantonError] + } yield v30.GetDomainIdResponse(domainId = result.domainId.toProtoPrimitive) + CantonGrpcUtil.mapErrNewEUS(ret) } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspection.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspection.scala index d08a121c0..6a0b91c85 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspection.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspection.scala @@ -15,7 +15,7 @@ import com.digitalasset.canton.topology.client.{DomainTopologyClient, TopologySn import com.digitalasset.canton.topology.{DomainId, ParticipantId, PartyId} import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.MonadUtil -import com.digitalasset.canton.{LfPartyId, TransferCounterO} +import com.digitalasset.canton.{LfPartyId, TransferCounter} import scala.collection.immutable.SortedMap import scala.collection.mutable @@ -65,7 +65,7 @@ private[inspection] object AcsInspection { def getCurrentSnapshot(state: SyncDomainPersistentState)(implicit traceContext: TraceContext, ec: ExecutionContext, - ): Future[Option[AcsSnapshot[SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)]]]] = + ): Future[Option[AcsSnapshot[SortedMap[LfContractId, (CantonTimestamp, TransferCounter)]]]] = for { cursorHeadO <- state.requestJournalStore.preheadClean snapshot <- cursorHeadO @@ -90,7 +90,7 @@ private[inspection] object AcsInspection { traceContext: TraceContext, ec: ExecutionContext, ): EitherT[Future, Error, AcsSnapshot[ - SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)] + SortedMap[LfContractId, (CantonTimestamp, TransferCounter)] ]] = for { _ <- @@ -120,11 +120,11 @@ private[inspection] object AcsInspection { traceContext: TraceContext, ec: ExecutionContext, ): EitherT[Future, Error, Option[ - AcsSnapshot[Iterator[Seq[(LfContractId, TransferCounterO)]]] + AcsSnapshot[Iterator[Seq[(LfContractId, TransferCounter)]]] ]] = { type MaybeSnapshot = - Option[AcsSnapshot[SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)]]] + Option[AcsSnapshot[SortedMap[LfContractId, (CantonTimestamp, TransferCounter)]]] val maybeSnapshotET: EitherT[Future, Error, MaybeSnapshot] = timestamp match { case Some(timestamp) => @@ -157,7 +157,7 @@ private[inspection] object AcsInspection { parties: Set[LfPartyId], timestamp: Option[CantonTimestamp], skipCleanTimestampCheck: Boolean = false, - )(f: (SerializableContract, TransferCounterO) => Either[Error, Unit])(implicit + )(f: (SerializableContract, TransferCounter) => Either[Error, Unit])(implicit traceContext: TraceContext, ec: ExecutionContext, ): EitherT[Future, Error, Option[(Set[LfPartyId], CantonTimestamp)]] = { @@ -223,8 +223,8 @@ private[inspection] object AcsInspection { domainId: DomainId, state: SyncDomainPersistentState, parties: Set[LfPartyId], - f: (SerializableContract, TransferCounterO) => Either[Error, Unit], - )(batch: Seq[(LfContractId, TransferCounterO)])(implicit + f: (SerializableContract, TransferCounter) => Either[Error, Unit], + )(batch: Seq[(LfContractId, TransferCounter)])(implicit traceContext: TraceContext, ec: ExecutionContext, ): EitherT[Future, Error, Set[LfPartyId]] = { diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala index e773f3b6a..4a94d1e17 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/inspection/SyncStateInspection.scala @@ -60,7 +60,7 @@ import com.digitalasset.canton.{ LedgerTransactionId, LfPartyId, RequestCounter, - TransferCounterO, + TransferCounter, } import java.io.OutputStream @@ -133,7 +133,7 @@ final class SyncStateInspection( domainAlias: DomainAlias )(implicit traceContext: TraceContext - ): EitherT[Future, AcsError, Map[LfContractId, (CantonTimestamp, TransferCounterO)]] = { + ): EitherT[Future, AcsError, Map[LfContractId, (CantonTimestamp, TransferCounter)]] = { for { state <- EitherT.fromEither[Future]( @@ -143,7 +143,7 @@ final class SyncStateInspection( ) snapshotO <- EitherT.liftF(AcsInspection.getCurrentSnapshot(state).map(_.map(_.snapshot))) - } yield snapshotO.fold(Map.empty[LfContractId, (CantonTimestamp, TransferCounterO)])( + } yield snapshotO.fold(Map.empty[LfContractId, (CantonTimestamp, TransferCounter)])( _.toMap ) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala index 8d9b1fd14..4ee1ae325 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/ChangeAssignation.scala @@ -73,52 +73,51 @@ private final class ChangeAssignation( source: Map[LfContractId, ContractState] )(implicit executionContext: ExecutionContext - ): EitherT[Future, String, List[ChangeAssignation.Data[(LfContractId, TransferCounterO)]]] = + ): EitherT[Future, String, List[ChangeAssignation.Data[(LfContractId, TransferCounter)]]] = { + def errorUnlessSkipInactive( + cid: ChangeAssignation.Data[LfContractId], + reason: String, + ): Either[String, None.type] = + Either.cond( + skipInactive, + None, + s"Cannot change contract assignation: contract $cid $reason.", + ) + EitherT.fromEither( contractIds .map(cid => (cid, source.get(cid.payload).map(_.status))) .toList .traverse { case (cid, None) => - Either.cond( - skipInactive, - None, - s"Contract $cid does not exist in source domain and cannot be moved.", - ) + errorUnlessSkipInactive(cid, "does not exist in source domain") case (cid, Some(ActiveContractStore.Active(transferCounter))) => Right(Some(cid.copy(payload = (cid.payload, transferCounter)))) case (cid, Some(ActiveContractStore.Archived)) => - Either.cond( - skipInactive, - None, - s"Contract $cid has been archived and cannot be moved.", - ) + errorUnlessSkipInactive(cid, "has been archived") + case (cid, Some(ActiveContractStore.Purged)) => + errorUnlessSkipInactive(cid, "has been purged") case (cid, Some(ActiveContractStore.TransferredAway(target, _transferCounter))) => - Either - .cond( - skipInactive, - None, - s"Contract $cid has been transferred to $target and cannot be moved.", - ) + errorUnlessSkipInactive(cid, s"has been transferred to $target") } .map(_.flatten) ) + } private def changingContractIds( - sourceContracts: List[ChangeAssignation.Data[(LfContractId, TransferCounterO)]], + sourceContracts: List[ChangeAssignation.Data[(LfContractId, TransferCounter)]], targetStatus: Map[LfContractId, ContractState], )(implicit executionContext: ExecutionContext, traceContext: TraceContext, - ): EitherT[Future, String, List[ChangeAssignation.Data[(LfContractId, TransferCounterO)]]] = { + ): EitherT[Future, String, List[ChangeAssignation.Data[(LfContractId, TransferCounter)]]] = { val filteredE = sourceContracts .traverse { case data @ ChangeAssignation.Data((cid, transferCounter), _, _) => val targetStatusOfContract = targetStatus.get(cid).map(_.status) targetStatusOfContract match { case None | Some(ActiveContractStore.TransferredAway(_, _)) => - transferCounter - .traverse(_.increment) + transferCounter.increment .map(incrementedTc => data.copy(payload = (cid, incrementedTc))) case Some(targetState) => Left( @@ -169,13 +168,13 @@ private final class ChangeAssignation( private def readContractsFromSource( contractIdsWithTransferCounters: List[ - ChangeAssignation.Data[(LfContractId, TransferCounterO)] + ChangeAssignation.Data[(LfContractId, TransferCounter)] ] )(implicit executionContext: ExecutionContext, traceContext: TraceContext, ): EitherT[Future, String, List[ - (SerializableContract, ChangeAssignation.Data[(LfContractId, TransferCounterO)]) + (SerializableContract, ChangeAssignation.Data[(LfContractId, TransferCounter)]) ]] = repairSource.domain.persistentState.contractStore .lookupManyUncached(contractIdsWithTransferCounters.map(_.payload._1)) @@ -186,7 +185,7 @@ private final class ChangeAssignation( private def readContracts( contractIdsWithTransferCounters: List[ - ChangeAssignation.Data[(LfContractId, TransferCounterO)] + ChangeAssignation.Data[(LfContractId, TransferCounter)] ] )(implicit executionContext: ExecutionContext, traceContext: TraceContext): EitherT[ Future, @@ -200,9 +199,7 @@ private final class ChangeAssignation( data @ ChangeAssignation.Data((contractId, transferCounter), _, _), ) => for { - transferCounter <- EitherT.fromEither[Future]( - transferCounter.fold(TransferCounter.Genesis.increment)(Right(_)) - ) + transferCounter <- EitherT.fromEither[Future](Right(transferCounter)) serializedTargetO <- EitherT.right( repairTarget.domain.persistentState.contractStore.lookupContract(contractId).value ) @@ -255,7 +252,7 @@ private final class ChangeAssignation( ( contract.payload.contract.contractId, targetDomainId, - Some(contract.payload.transferCounter), + contract.payload.transferCounter, contract.sourceTimeOfChange, ) } @@ -268,7 +265,7 @@ private final class ChangeAssignation( ( contract.payload.contract.contractId, sourceDomainId, - Some(contract.payload.transferCounter), + contract.payload.transferCounter, contract.targetTimeOfChange, ) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/EnsureValidContractIds.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/EnsureValidContractIds.scala index f759657af..14b8790ae 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/EnsureValidContractIds.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/EnsureValidContractIds.scala @@ -7,6 +7,7 @@ import cats.Eval import cats.data.EitherT import cats.syntax.either.* import cats.syntax.parallel.* +import cats.syntax.traverse.* import com.daml.lf.crypto.Hash import com.digitalasset.canton.crypto.{HashOps, HmacOps, Salt} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} @@ -20,7 +21,6 @@ import com.digitalasset.canton.protocol.{ } import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.{TraceContext, Traced} -import com.digitalasset.canton.util.EitherUtil import com.digitalasset.canton.version.ProtocolVersion import scala.collection.concurrent.TrieMap @@ -79,6 +79,11 @@ object EnsureValidContractIds { } + private final case class DiscriminatorWithContractId( + discriminator: Hash, + contractId: LfContractId, + ) + /** Recompute the contract IDs of all contracts using the provided cryptoOps. * The whole preprocessing will fail if any of the following conditions apply to any contract: * - the contract ID discriminator version is unknown @@ -113,11 +118,13 @@ object EnsureValidContractIds { // of the `Eval` as part of resolving the (recomputed) contract ID for dependencies will cause the // immediate retrieval of the dependency, possibly triggering recomputation, limiting throughput in // the presence of dependencies but preventing deadlocks while being stack-safe (`Eval` employs - // trampolining). + // trampolining). If a contract ID is reached for which there is no instance, the recomputation + // cannot be performed. This is normal, as the dependency might have been archived and pruned. Still, + // we issue a warning out of caution. private def recomputeContractIdSuffix( activeContract: ActiveContract, contractIdVersion: CantonContractIdVersion, - )(implicit ec: ExecutionContext): EitherT[Future, String, ActiveContract] = { + )(implicit tc: TraceContext, ec: ExecutionContext): EitherT[Future, String, ActiveContract] = { val contract = activeContract.contract for { @@ -126,16 +133,13 @@ object EnsureValidContractIds { depsRemapping <- contract.contractInstance.unversioned.cids.toSeq .parTraverse { contractId => fullRemapping - .getOrElse( - contractId, - Eval.now( - EitherT.leftT[Future, ActiveContract]( - s"Illegal state: missing dependency with contract ID '${contractId.coid}'" - ) - ), - ) - .value - .map(contract => contractId -> contract.contract.contractId) + .get(contractId) + .fold { + logger.warn( + s"Missing dependency with contract ID '${contractId.coid}'. The contract might have been archived. Its contract ID cannot be recomputed." + ) + EitherT.rightT[Future, String](contractId -> contractId) + }(_.value.map(contract => contractId -> contract.contract.contractId)) } .map(_.toMap) newRawContractInstance <- EitherT @@ -192,11 +196,32 @@ object EnsureValidContractIds { }, ) + private def ensureDiscriminatorUniqueness( + contracts: Seq[ActiveContract] + ): Either[String, Unit] = { + val allContractIds = contracts.map(_.contract.contractId) + val allDependencies = contracts.flatMap(_.contract.contractInstance.unversioned.cids) + (allContractIds ++ allDependencies) + .traverse { + case contractId @ LfContractId.V1(discriminator, _) => + Right(DiscriminatorWithContractId(discriminator, contractId)) + case unknown => + Left(s"Unknown LF contract ID version, cannot recompute contract ID ${unknown.coid}") + } + .map(_.groupMapReduce(_.discriminator)(cid => Set(cid.contractId))(_ ++ _)) + .flatMap( + _.collectFirst { case cid @ (_, contractIds) if contractIds.sizeIs > 1 => cid } + .toLeft(()) + .leftMap { case (discriminator, contractIds) => + s"Duplicate discriminator '${discriminator.bytes.toHexString}' is used by ${contractIds.size} contract IDs, including (showing up to 10): ${contractIds.take(10).map(_.coid).mkString(", ")}..." + } + ) + } + private def recomputeBrokenContractIdSuffixes(contracts: Seq[ActiveContract])(implicit ec: ExecutionContext, tc: TraceContext, ): EitherT[Future, String, (Seq[ActiveContract], Map[LfContractId, LfContractId])] = { - // Associate every contract ID with a lazy deferred computation that will recompute the contract ID if necessary // It's lazy so that every single contract ID is associated with a computation, before the first one finishes. // The assumptions are that every contract ID references in any payload has an associated `ActiveContract` in @@ -217,24 +242,12 @@ object EnsureValidContractIds { } yield completedRemapping -> contractIdRemapping.toMap } - private def ensureAllDependenciesArePresent( - contracts: Seq[ActiveContract] - ): Either[String, Unit] = { - val allContractIds = contracts.map(_.contract.contractId).toSet - val allDependencies = contracts.flatMap(_.contract.contractInstance.unversioned.cids).toSet - val missingDependencies = allDependencies.diff(allContractIds) - EitherUtil.condUnitE( - missingDependencies.isEmpty, - s"Missing ${missingDependencies.size} dependencies (listing up to 10): ${missingDependencies.take(10).map(_.coid).mkString(", ")}", - ) - } - override def apply(contracts: Seq[ActiveContract])(implicit ec: ExecutionContext, tc: TraceContext, ): EitherT[Future, String, (Seq[ActiveContract], Map[LfContractId, LfContractId])] = for { - _ <- EitherT.fromEither[Future](ensureAllDependenciesArePresent(contracts)) + _ <- EitherT.fromEither[Future](ensureDiscriminatorUniqueness(contracts)) completedRemapping <- recomputeBrokenContractIdSuffixes(contracts) } yield completedRemapping } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala index 9b2f1fc0f..51dfa82d5 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/admin/repair/RepairService.scala @@ -5,7 +5,6 @@ package com.digitalasset.canton.participant.admin.repair import cats.Eval import cats.data.{EitherT, OptionT} -import cats.implicits.catsSyntaxTuple2Semigroupal import cats.syntax.either.* import cats.syntax.parallel.* import cats.syntax.traverse.* @@ -134,18 +133,23 @@ final class RepairService( acsState: Option[ActiveContractStore.Status], )(implicit traceContext: TraceContext): EitherT[Future, String, Option[ContractToAdd]] = { val contractId = repairContract.contract.contractId + + def addContract( + transferringFrom: Option[SourceDomainId] + ): EitherT[Future, String, Option[ContractToAdd]] = Right( + Option( + ContractToAdd( + repairContract.contract, + repairContract.witnesses.map(_.toLf), + repairContract.transferCounter, + transferringFrom, + ) + ) + ).toEitherT[Future] + acsState match { - case None => - Right( - Option( - ContractToAdd( - repairContract.contract, - repairContract.witnesses.map(_.toLf), - repairContract.transferCounter, - None, - ) - ) - ).toEitherT[Future] + case None => addContract(transferringFrom = None) + case Some(ActiveContractStore.Active(_)) => if (ignoreAlreadyAdded) { logger.debug(s"Skipping contract $contractId because it is already active") @@ -177,27 +181,17 @@ final class RepairService( s"Cannot add previously archived contract ${repairContract.contract.contractId} as archived contracts cannot become active." ) ) + case Some(ActiveContractStore.Purged) => addContract(transferringFrom = None) case Some(ActiveContractStore.TransferredAway(targetDomain, transferCounter)) => log( s"Marking contract ${repairContract.contract.contractId} previously transferred-out to $targetDomain as " + s"transferred-in from $targetDomain (even though contract may have been transferred to yet another domain since)." ).discard - val isTransferCounterIncreasing = (transferCounter, repairContract.transferCounter) - .mapN { case (tc, repairTc) => repairTc > tc } - .getOrElse(true) + val isTransferCounterIncreasing = repairContract.transferCounter > transferCounter if (isTransferCounterIncreasing) { - Right( - Option( - ContractToAdd( - repairContract.contract, - repairContract.witnesses.map(_.toLf), - repairContract.transferCounter, - Option(SourceDomainId(targetDomain.unwrap)), - ) - ) - ).toEitherT[Future] + addContract(transferringFrom = Option(SourceDomainId(targetDomain.unwrap))) } else { EitherT.leftT( log( @@ -721,6 +715,14 @@ final class RepairService( private def purgeContract(repair: RepairRequest, ignoreAlreadyPurged: Boolean)( cid: LfContractId )(implicit traceContext: TraceContext): EitherT[Future, String, Option[SerializableContract]] = { + def ignoreOrError(reason: String) = EitherT.cond[Future]( + ignoreAlreadyPurged, + None, + log( + s"Contract $cid cannot be purged: $reason. Set ignoreAlreadyPurged = true to skip non-existing contracts." + ), + ) + val timeOfChange = repair.tryExactlyOneTimeOfChange for { acsStatus <- readContractAcsState(repair.domain.persistentState, cid) @@ -732,47 +734,34 @@ final class RepairService( // Not checking that the participant hosts a stakeholder as we might be cleaning up contracts // on behalf of stakeholders no longer around. contractToArchiveInEvent <- acsStatus match { - case None => - EitherT.cond[Future]( - ignoreAlreadyPurged, - None, - log( - s"Contract $cid does not exist in domain ${repair.domain.alias} and cannot be purged. Set ignoreAlreadyPurged = true to skip non-existing contracts." - ), - ) + case None => ignoreOrError("unknown contract") case Some(ActiveContractStore.Active(_)) => for { - contract <- EitherT + _contract <- EitherT .fromOption[Future]( contractO, log(show"Active contract $cid not found in contract store"), ) - _ <- persistArchival(repair, timeOfChange)(cid) + _ <- persistPurge(repair, timeOfChange)(cid) } yield { logger.info( s"purged contract $cid at repair request ${repair.tryExactlyOneRequestCounter} at ${repair.timestamp}" ) contractO } - case Some(ActiveContractStore.Archived) => - EitherT.cond[Future]( - ignoreAlreadyPurged, - None, - log( - s"Contract $cid is already archived in domain ${repair.domain.alias} and cannot be purged. Set ignoreAlreadyPurged = true to skip archived contracts." - ), - ) + case Some(ActiveContractStore.Archived) => ignoreOrError("archived contract") + case Some(ActiveContractStore.Purged) => ignoreOrError("purged contract") case Some(ActiveContractStore.TransferredAway(targetDomain, transferCounter)) => log( s"Purging contract $cid previously marked as transferred away to $targetDomain. " + s"Marking contract as transferred-in from $targetDomain (even though contract may have since been transferred to yet another domain) and subsequently as archived." ).discard for { - newTransferCounter <- EitherT.fromEither[Future](transferCounter.traverse(_.increment)) + newTransferCounter <- EitherT.fromEither[Future](transferCounter.increment) sourceDomain = SourceDomainId(targetDomain.unwrap) _ <- persistTransferIn(repair, sourceDomain, cid, newTransferCounter, timeOfChange) - _ <- persistArchival(repair, timeOfChange)(cid) + _ <- persistPurge(repair, timeOfChange)(cid) } yield contractO } } yield contractToArchiveInEvent @@ -843,16 +832,13 @@ final class RepairService( private def persistCreation( repair: RepairRequest, cid: LfContractId, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, timeOfChange: TimeOfChange, )(implicit traceContext: TraceContext ): EitherT[Future, String, Unit] = { repair.domain.persistentState.activeContractStore - .markContractActive( - cid -> transferCounter, - timeOfChange, - ) + .markContractsAdded(Seq(cid -> transferCounter), timeOfChange) .toEitherTWithNonaborts .leftMap(e => log(s"Failed to create contract $cid in ActiveContractStore: $e")) } @@ -861,7 +847,7 @@ final class RepairService( repair: RepairRequest, sourceDomain: SourceDomainId, cid: LfContractId, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, timeOfChange: TimeOfChange, )(implicit traceContext: TraceContext @@ -877,12 +863,12 @@ final class RepairService( The latter is due to the fact that we do not know the correct transfer counters at the time we persist the deactivation in the ACS. */ - private def persistArchival( + private def persistPurge( repair: RepairRequest, timeOfChange: TimeOfChange, )(cid: LfContractId)(implicit traceContext: TraceContext): EitherT[Future, String, Unit] = repair.domain.persistentState.activeContractStore - .archiveContract(cid, timeOfChange) + .purgeContract(cid, timeOfChange) .toEitherT // not turning warnings to errors on behalf of archived contracts, in contract to created contracts .leftMap(e => log(s"Failed to mark contract $cid as archived: $e")) @@ -1386,7 +1372,7 @@ object RepairService { private final case class ContractToAdd( contract: SerializableContract, witnesses: Set[LfPartyId], - transferCounter: TransferCounterO, + transferCounter: TransferCounter, transferringFrom: Option[SourceDomainId], ) { def driverMetadata(protocolVersion: ProtocolVersion): Bytes = diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala index b157093f4..bb89128d7 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/config/LocalParticipantConfig.scala @@ -8,7 +8,7 @@ import com.daml.jwt.JwtTimestampLeeway import com.digitalasset.canton.config.RequireTypes.* import com.digitalasset.canton.config.* import com.digitalasset.canton.http.HttpApiConfig -import com.digitalasset.canton.ledger.api.tls.{SecretsUrl, TlsConfiguration, TlsVersion} +import com.digitalasset.canton.ledger.api.tls.{TlsConfiguration, TlsVersion} import com.digitalasset.canton.networking.grpc.CantonServerBuilder import com.digitalasset.canton.participant.admin.AdminWorkflowConfig import com.digitalasset.canton.participant.config.LedgerApiServerConfig.DefaultRateLimit @@ -253,7 +253,6 @@ object LedgerApiServerConfig { Some(keyCertChainFile), Some(keyFile), trustCertCollectionFile, - secretsUrl, authRequirement, enableCertRevocationChecking, optTlsVersion, @@ -264,7 +263,6 @@ object LedgerApiServerConfig { certChainFile = ExistingFile.tryCreate(keyCertChainFile), privateKeyFile = ExistingFile.tryCreate(keyFile), trustCollectionFile = trustCertCollectionFile.map(x => ExistingFile.tryCreate(x)), - secretsUrl = secretsUrl.map(_.toString), clientAuth = fromClientAuth(authRequirement), minimumServerProtocolVersion = optTlsVersion.map(_.version), enableCertRevocationChecking = enableCertRevocationChecking, @@ -287,7 +285,6 @@ object LedgerApiServerConfig { certChainFile = Some(tlsCantonConfig.certChainFile.unwrap), privateKeyFile = Some(tlsCantonConfig.privateKeyFile.unwrap), trustCollectionFile = tlsCantonConfig.trustCollectionFile.map(_.unwrap), - secretsUrl = tlsCantonConfig.secretsUrl.map(SecretsUrl.fromString), clientAuth = tlsCantonConfig.clientAuth match { case ServerAuthRequirementConfig.Require(_cert) => ClientAuth.REQUIRE case ServerAuthRequirementConfig.Optional => ClientAuth.OPTIONAL diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistry.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistry.scala index a37b730e9..2845655f4 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistry.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/DomainRegistry.scala @@ -59,6 +59,8 @@ object DomainRegistryError extends DomainRegistryErrorGroup { .Error(cause) case SequencerInfoLoaderError.FailedToConnectToSequencers(cause) => DomainRegistryError.ConnectionErrors.FailedToConnectToSequencers.Error(cause) + case SequencerInfoLoaderError.InconsistentConnectivity(cause) => + DomainRegistryError.ConnectionErrors.FailedToConnectToSequencers.Error(cause) } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala index bbeaba693..3830eff1a 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/domain/grpc/GrpcDomainRegistry.scala @@ -29,12 +29,12 @@ import com.digitalasset.canton.participant.topology.{ TopologyComponentFactory, } import com.digitalasset.canton.protocol.StaticDomainParameters -import com.digitalasset.canton.sequencing.SequencerConnections import com.digitalasset.canton.sequencing.client.{ RecordingConfig, ReplayConfig, RichSequencerClient, } +import com.digitalasset.canton.sequencing.{SequencerConnectionValidation, SequencerConnections} import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.client.DomainTopologyClientWithInit @@ -123,7 +123,11 @@ class GrpcDomainRegistry( val runE = for { info <- sequencerInfoLoader - .loadSequencerEndpoints(config.domain, sequencerConnections)( + .loadAndAggregateSequencerEndpoints( + config.domain, + sequencerConnections, + SequencerConnectionValidation.Active, // only validate active sequencers (not all endpoints) + )( traceContext, CloseContext(this), ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/event/AcsChangeListener.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/event/AcsChangeListener.scala index 999be3580..e1a10a5e1 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/event/AcsChangeListener.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/event/AcsChangeListener.scala @@ -8,8 +8,9 @@ import com.digitalasset.canton.logging.{HasLoggerName, NamedLoggingContext} import com.digitalasset.canton.participant.protocol.conflictdetection.CommitSet import com.digitalasset.canton.protocol.{ContractMetadata, LfContractId, WithContractHash} import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.ErrorUtil import com.digitalasset.canton.util.ShowUtil.* -import com.digitalasset.canton.{LfPartyId, TransferCounterO} +import com.digitalasset.canton.{LfPartyId, TransferCounter} import com.google.common.annotations.VisibleForTesting /** Components that need to keep a running snapshot of ACS. @@ -37,7 +38,7 @@ final case class AcsChange( final case class ContractMetadataAndTransferCounter( contractMetadata: ContractMetadata, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) extends PrettyPrinting { override def pretty: Pretty[ContractMetadataAndTransferCounter] = prettyOfClass( param("contract metadata", _.contractMetadata), @@ -47,7 +48,7 @@ final case class ContractMetadataAndTransferCounter( final case class ContractStakeholdersAndTransferCounter( stakeholders: Set[LfPartyId], - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) extends PrettyPrinting { override def pretty: Pretty[ContractStakeholdersAndTransferCounter] = prettyOfClass( param("stakeholders", _.stakeholders), @@ -78,18 +79,33 @@ object AcsChange extends HasLoggerName { /** Returns an AcsChange based on a given CommitSet. * * @param commitSet The commit set from which to build the AcsChange. - * @param transferCounterOfArchivalIncomplete A map containing transfer counters for every contract in archived contracts - * in the commitSet, i.e., `commitSet.archivals`. If there are archived contracts - * that do not exist in the map, they are assumed to have transfer counter None. + * @param transferCounterOfNonTransientArchivals A map containing transfer counters for every non-transient + * archived contracts in the commitSet, i.e., `commitSet.archivals`. + * @param transferCounterOfTransientArchivals A map containing transfer counters for every transient + * archived contracts in the commitSet, i.e., `commitSet.archivals`. + * @throws java.lang.IllegalStateException if the contract ids in `transferCounterOfTransientArchivals`; + * if the contract ids in `transferCounterOfNonTransientArchivals` are not a subset of `commitSet.archivals` ; + * if the union of contracts ids in `transferCounterOfTransientArchivals` and + * `transferCounterOfNonTransientArchivals` does not equal the contract ids in `commitSet.archivals`; */ - def fromCommitSet( + def tryFromCommitSet( commitSet: CommitSet, - transferCounterOfArchivalIncomplete: Map[LfContractId, TransferCounterO], + transferCounterOfNonTransientArchivals: Map[LfContractId, TransferCounter], + transferCounterOfTransientArchivals: Map[LfContractId, TransferCounter], )(implicit loggingContext: NamedLoggingContext): AcsChange = { - val transferCounterOfArchival = commitSet.archivals.keySet - .map(k => (k, transferCounterOfArchivalIncomplete.getOrElse(k, None))) - .toMap + if ( + transferCounterOfTransientArchivals.keySet.union( + transferCounterOfNonTransientArchivals.keySet + ) != commitSet.archivals.keySet + ) { + ErrorUtil.internalError( + new IllegalStateException( + s"the union of contracts ids in $transferCounterOfTransientArchivals and " + + s"$transferCounterOfNonTransientArchivals does not equal the contract ids in ${commitSet.archivals}" + ) + ) + } /* Temporary maps built to easily remove the transient contracts from activate and deactivate the common contracts. The keys are made of the contract id and transfer counter. A transfer-out with transfer counter c cancels out a transfer-in / create with transfer counter c-1. @@ -110,24 +126,6 @@ object AcsChange extends HasLoggerName { ) } - val tmpArchivals = commitSet.archivals.map { case (contractId, data) => - ( - ( - contractId, - // If the transfer counter for an archival is None, either the protocol version does not support - // transfer counters, or the contract might be transient and created / transferred-in in - // the same commit set. Thus we search in the commit set the latest transfer counter of the same contract. - // transferCounterOfArchival.get(contractId).getOrElse(transferCounterTransient(contractId)) - transferCountersforArchivedCidInclTransient( - contractId, - commitSet, - transferCounterOfArchival, - ), - ), - WithContractHash(data.unwrap.stakeholders, data.contractHash), - ) - } - /* Subtracting the transfer counter of transfer-outs to correctly match deactivated contracts as explained above */ @@ -135,21 +133,60 @@ object AcsChange extends HasLoggerName { ( ( contractId, - data.unwrap.transferCounter.map(_ - 1), + data.unwrap.transferCounter - 1, ), data.map(_.stakeholders), ) } + val tmpArchivalsClean = commitSet.archivals.collect { + case (contractId, data) if transferCounterOfNonTransientArchivals.contains(contractId) => + ( + ( + contractId, + transferCounterOfNonTransientArchivals.getOrElse( + contractId, + // This should not happen (see assertion above) + ErrorUtil.internalError( + new IllegalStateException(s"Unable to find transfer counter for $contractId") + ), + ), + ), + WithContractHash(data.unwrap.stakeholders, data.contractHash), + ) + } + + val tmpArchivals = commitSet.archivals.collect { + case (contractId, data) if transferCounterOfTransientArchivals.contains(contractId) => + ( + ( + contractId, + transferCounterOfTransientArchivals.getOrElse( + contractId, + ErrorUtil.internalError( + new IllegalStateException( + s"${transferCounterOfTransientArchivals.keySet} is not a subset of ${commitSet.archivals}" + ) + ), + ), + ), + WithContractHash(data.unwrap.stakeholders, data.contractHash), + ) + } + val transient = tmpActivations.keySet.intersect((tmpArchivals ++ tmpTransferOuts).keySet) val tmpActivationsClean = tmpActivations -- transient - val tmpArchivalsClean = tmpArchivals -- transient val tmpTransferOutsClean = tmpTransferOuts -- transient val activations = tmpActivationsClean.map { case ((contractId, transferCounter), metadata) => ( contractId, - metadata.map(data => ContractMetadataAndTransferCounter(data, transferCounter)), + metadata.map(data => + ContractMetadataAndTransferCounter( + data, + transferCounter, + ) + ), ) } val archivalDeactivations = tmpArchivalsClean.map { @@ -168,8 +205,9 @@ object AcsChange extends HasLoggerName { } loggingContext.debug( show"Called fromCommitSet with inputs commitSet creations=${commitSet.creations};" + - show"transferIns=${commitSet.transferIns}; archivals=${commitSet.archivals}; transferOuts=${commitSet.transferOuts} and" + - show"archival transfer counters from DB $transferCounterOfArchivalIncomplete" + + show"transferIns=${commitSet.transferIns}; archivals=${commitSet.archivals}; transferOuts=${commitSet.transferOuts};" + + show"archival transfer counters from DB $transferCounterOfNonTransientArchivals and" + + show"archival transfer counters from transient $transferCounterOfTransientArchivals" + show"Completed fromCommitSet with results transient=$transient;" + show"activations=$activations; archivalDeactivations=$archivalDeactivations; transferOutDeactivations=$transferOutDeactivations" ) @@ -180,26 +218,24 @@ object AcsChange extends HasLoggerName { } @VisibleForTesting - def transferCountersforArchivedCidInclTransient( - contractId: LfContractId, - commitSet: CommitSet, - transferCounterOfArchival: Map[LfContractId, TransferCounterO], - ): TransferCounterO = { - transferCounterOfArchival.get(contractId) match { - case Some(tc) if tc.isDefined => tc - case _ => - // We first search in transfer-ins, because they would have the most recent transfer counter. - commitSet.transferIns.get(contractId) match { - case Some(tcAndContractHash) if tcAndContractHash.unwrap.transferCounter.isDefined => - tcAndContractHash.unwrap.transferCounter - case _ => - // Then we search in creations - commitSet.creations.get(contractId) match { - case Some(tcAndCHash) if tcAndCHash.unwrap.transferCounter.isDefined => - tcAndCHash.unwrap.transferCounter - case _ => None - } - } + def transferCountersForArchivedTransient( + commitSet: CommitSet + ): Map[LfContractId, TransferCounter] = { + + // We first search in transfer-ins, because they would have the most recent transfer counter. + val transientCidsTransferredIn = commitSet.transferIns.collect { + case (contractId, tcAndContractHash) if commitSet.archivals.keySet.contains(contractId) => + (contractId, tcAndContractHash.unwrap.transferCounter) } + + // Then we search in creations + val transientCidsCreated = commitSet.creations.collect { + case (contractId, tcAndContractHash) + if commitSet.archivals.keySet.contains(contractId) && !transientCidsTransferredIn.keySet + .contains(contractId) => + (contractId, tcAndContractHash.unwrap.transferCounter) + } + + transientCidsTransferredIn ++ transientCidsCreated } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala index 4aadddc8f..ad6ad86d3 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/event/RecordOrderPublisher.scala @@ -23,6 +23,7 @@ import com.digitalasset.canton.lifecycle.{ } import com.digitalasset.canton.logging.pretty.Pretty import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} +import com.digitalasset.canton.participant.event.AcsChange.transferCountersForArchivedTransient import com.digitalasset.canton.participant.event.RecordOrderPublisher.PendingPublish import com.digitalasset.canton.participant.protocol.conflictdetection.CommitSet import com.digitalasset.canton.participant.protocol.submission.{ @@ -398,15 +399,25 @@ class RecordOrderPublisher( show"transfer-ins ${commitSet.transferIns}" + show"archivals ${commitSet.archivals} transfer-outs ${commitSet.transferOuts}" ) + + val transientArchivals = transferCountersForArchivedTransient(commitSet) + val acsChangePublish = for { // Retrieves the transfer counters of the archived contracts from the latest state in the active contract store archivalsWithTransferCountersOnly <- activeContractSnapshot - .bulkContractsTransferCounterSnapshot(commitSet.archivals.keySet, requestCounter) + .bulkContractsTransferCounterSnapshot( + commitSet.archivals.keySet -- transientArchivals.keySet, + requestCounter, + ) } yield { // Computes the ACS change by decorating the archive events in the commit set with their transfer counters - val acsChange = AcsChange.fromCommitSet(commitSet, archivalsWithTransferCountersOnly) + val acsChange = AcsChange.tryFromCommitSet( + commitSet, + archivalsWithTransferCountersOnly, + transientArchivals, + ) logger.debug( s"Computed ACS change activations ${acsChange.activations} deactivations ${acsChange.deactivations}" ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala index 435a0ad62..06fab6662 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/CantonLedgerApiServerWrapper.scala @@ -15,7 +15,6 @@ import com.digitalasset.canton.concurrent.{ import com.digitalasset.canton.config.{NonNegativeFiniteDuration, ProcessingTimeout, StorageConfig} import com.digitalasset.canton.http.JsonApiConfig import com.digitalasset.canton.http.metrics.HttpApiMetrics -import com.digitalasset.canton.ledger.configuration.LedgerId import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, Lifecycle} import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} @@ -45,7 +44,6 @@ object CantonLedgerApiServerWrapper extends NoTracing { * @param jsonApiConfig JSON API configuration * @param indexerConfig indexer configuration * @param indexerLockIds Optional lock IDs to be used for indexer HA - * @param ledgerId unique ledger id used by the ledger API server * @param participantId unique participant id used e.g. for a unique ledger API server index db name * @param engine daml engine shared with Canton for performance reasons * @param syncService canton sync service implementing both read and write services @@ -63,7 +61,6 @@ object CantonLedgerApiServerWrapper extends NoTracing { jsonApiConfig: Option[JsonApiConfig], indexerConfig: IndexerConfig, indexerHaConfig: HaConfig, - ledgerId: LedgerId, participantId: LedgerParticipantId, engine: Engine, syncService: CantonSyncService, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala index 154ac7133..3ddfec173 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/StartableStoppableLedgerApiServer.scala @@ -16,7 +16,6 @@ import com.digitalasset.canton.concurrent.{ import com.digitalasset.canton.config.{MemoryStorageConfig, ProcessingTimeout} import com.digitalasset.canton.http.HttpApiServer import com.digitalasset.canton.ledger.api.auth.CachedJwtVerifierLoader -import com.digitalasset.canton.ledger.api.domain import com.digitalasset.canton.ledger.api.domain.{Filters, TransactionFilter} import com.digitalasset.canton.ledger.api.health.HealthChecks import com.digitalasset.canton.ledger.api.util.TimeProvider @@ -58,7 +57,7 @@ import com.digitalasset.canton.util.{FutureUtil, SimpleExecutionQueue} import com.digitalasset.canton.{DiscardOps, LfPartyId} import io.grpc.ServerInterceptor import io.opentelemetry.api.trace.Tracer -import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTracing +import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTelemetry import org.apache.pekko.NotUsed import org.apache.pekko.actor.ActorSystem import org.apache.pekko.stream.scaladsl.Source @@ -263,7 +262,6 @@ class StartableStoppableLedgerApiServer( } indexService <- new IndexServiceOwner( dbSupport = dbSupport, - ledgerId = domain.LedgerId(config.ledgerId), config = indexServiceConfig, participantId = config.participantId, metrics = config.metrics, @@ -316,7 +314,6 @@ class StartableStoppableLedgerApiServer( loggerFactory, ), partyRecordStore = partyRecordStore, - ledgerId = config.ledgerId, participantId = config.participantId, apiStreamShutdownTimeout = config.serverConfig.apiStreamShutdownTimeout, command = config.serverConfig.commandService, @@ -406,7 +403,7 @@ class StartableStoppableLedgerApiServer( config.loggerFactory, config.cantonParameterConfig.loggingConfig.api, ), - GrpcTracing + GrpcTelemetry .builder(config.tracerProvider.openTelemetry) .build() .newServerInterceptor(), diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/LedgerConnection.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/LedgerConnection.scala index e236a0581..0fa1861bb 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/LedgerConnection.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/ledger/api/client/LedgerConnection.scala @@ -24,7 +24,7 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.networking.grpc.ClientChannelBuilder import com.digitalasset.canton.topology.PartyId import com.digitalasset.canton.tracing.TracerProvider -import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTracing +import io.opentelemetry.instrumentation.grpc.v1_6.GrpcTelemetry import scala.concurrent.ExecutionContextExecutor @@ -57,7 +57,7 @@ object LedgerConnection { .builderFor(config.address, config.port.unwrap) .executor(ec) .intercept( - GrpcTracing.builder(tracerProvider.openTelemetry).build().newClientInterceptor() + GrpcTelemetry.builder(tracerProvider.openTelemetry).build().newClientInterceptor() ) LedgerClient.withoutToken(builder.build(), clientConfig, loggerFactory) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/ParticipantMetrics.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/ParticipantMetrics.scala index 309995c45..a56f921eb 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/ParticipantMetrics.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/ParticipantMetrics.scala @@ -176,7 +176,7 @@ class SyncDomainMetrics( ) val taskQueueForDoc: Gauge[Int] = NoOpGauge(prefix :+ "task-queue", 0) def taskQueue(size: () => Int): CloseableGauge = - factory.gauge(prefix :+ "task-queue", 0)(MetricsContext.Empty) + factory.gaugeWithSupplier(prefix :+ "task-queue", size) } // TODO(i14580): add testing @@ -190,7 +190,7 @@ class SyncDomainMetrics( qualification = Traffic, ) val extraTrafficAvailable: Gauge[Long] = - factory.gauge(prefix :+ "extra-traffic-credit-available", 0L)(MetricsContext.Empty) + factory.gauge(prefix :+ "extra-traffic-credit-available", 0L) @MetricDoc.Tag( summary = "Records a new top up on the participant", @@ -198,7 +198,7 @@ class SyncDomainMetrics( qualification = Traffic, ) val topologyTransaction: Gauge[Long] = - factory.gauge(prefix :+ "traffic-state-topology-transaction", 0L)(MetricsContext.Empty) + factory.gauge(prefix :+ "traffic-state-topology-transaction", 0L) @MetricDoc.Tag( summary = "Event was not delivered because of traffic limit exceeded", diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/PruningMetrics.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/PruningMetrics.scala index f814e33e7..7577c6a79 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/PruningMetrics.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/metrics/PruningMetrics.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.participant.metrics import com.daml.metrics.api.MetricDoc.MetricQualification.Debug -import com.daml.metrics.api.MetricHandle.{Gauge, Timer} +import com.daml.metrics.api.MetricHandle.{Gauge, Meter, Timer} import com.daml.metrics.api.{MetricDoc, MetricName, MetricsContext} import com.digitalasset.canton.metrics.CantonLabeledMetricsFactory @@ -25,6 +25,28 @@ class PruningMetrics( qualification = Debug, ) val compute: Timer = metricsFactory.timer(prefix :+ "compute") + + @MetricDoc.Tag( + summary = "Time spent in microseconds between commitment and sequencing.", + description = """Participant nodes compute bilateral commitments at regular intervals. After a commitment + |has been computed it is send for sequencing. This measures the time between the end of a + |commitment interval and when the commitment has been sequenced. A high value indicates that + |the participant is lagging behind in processing messages and computing commitments or the + |sequencer is slow in sequencing the commitment messages.""", + qualification = Debug, + ) + val sequencingTime: Gauge[Long] = + metricsFactory.gauge(prefix :+ "sequencing-time", 0L)(MetricsContext.Empty) + + @MetricDoc.Tag( + summary = "Times the catch up mode has been activated.", + description = + """Participant nodes compute bilateral commitments at regular intervals. This metric + |exposes how often catch-up mode has been activated. Catch-up mode is triggered according + |to catch-up config and happens if the participant lags behind on computation.""", + qualification = Debug, + ) + val catchupModeEnabled: Meter = metricsFactory.meter(prefix :+ "catchup-mode-enabled") } object prune { diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/Phase37Synchronizer.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/Phase37Synchronizer.scala index 862c5e569..3af2bd001 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/Phase37Synchronizer.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/Phase37Synchronizer.scala @@ -116,6 +116,7 @@ class Phase37Synchronizer( * This filter can be different for each call of awaitConfirmed, but only the first valid filter * will complete with the pending request data. */ + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) def awaitConfirmed(requestType: RequestType)( requestId: RequestId, filter: PendingRequestDataOrReplayData[requestType.PendingRequestData] => Future[Boolean] = diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/RepairProcessor.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/RepairProcessor.scala index 5fcdfac61..366860773 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/RepairProcessor.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/RepairProcessor.scala @@ -12,7 +12,7 @@ import com.digitalasset.canton.tracing.{TraceContext, W3CTraceContext} import java.util.ConcurrentModificationException import java.util.concurrent.atomic.AtomicReference -/** Deals with repair request as part of messsage processing. +/** Deals with repair request as part of message processing. * As is, it merely skips the request counters. */ class RepairProcessor( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala index 92e6a951a..fcea6d8ae 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/TransactionProcessingSteps.scala @@ -1204,7 +1204,7 @@ class TransactionProcessingSteps( traceContext: TraceContext ): EitherT[Future, TransactionProcessorError, CommitAndStoreContractsAndPublishEvent] = { val txValidationResult = pendingRequestData.transactionValidationResult - val commitSet = txValidationResult.commitSet(pendingRequestData.requestId)(protocolVersion) + val commitSet = txValidationResult.commitSet(pendingRequestData.requestId) computeCommitAndContractsAndEvent( requestTime = pendingRequestData.requestTime, @@ -1326,7 +1326,7 @@ class TransactionProcessingSteps( consumedInputsOfHostedParties = usedAndCreated.contracts.consumedInputsOfHostedStakeholders, transient = usedAndCreated.contracts.transient, createdContracts = createdContracts, - )(protocolVersion) + ) commitAndContractsAndEvent <- computeCommitAndContractsAndEvent( requestTime = pendingRequestData.requestTime, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/CommitSet.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/CommitSet.scala index 4903d60db..8b44439de 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/CommitSet.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/CommitSet.scala @@ -18,8 +18,7 @@ import com.digitalasset.canton.protocol.{ WithContractHash, } import com.digitalasset.canton.util.SetsUtil.requireDisjoint -import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{LfPartyId, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{LfPartyId, TransferCounter} /** Describes the effect of a confirmation request on the active contracts, contract keys, and transfers. * Transient contracts appear the following two sets: @@ -59,33 +58,33 @@ object CommitSet { final case class CreationCommit( contractMetadata: ContractMetadata, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) extends PrettyPrinting { override def pretty: Pretty[CreationCommit] = prettyOfClass( param("contractMetadata", _.contractMetadata), - paramIfDefined("transferCounter", _.transferCounter), + param("transferCounter", _.transferCounter), ) } final case class TransferOutCommit( targetDomainId: TargetDomainId, stakeholders: Set[LfPartyId], - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) extends PrettyPrinting { override def pretty: Pretty[TransferOutCommit] = prettyOfClass( param("targetDomainId", _.targetDomainId), paramIfNonEmpty("stakeholders", _.stakeholders), - paramIfDefined("transferCounter", _.transferCounter), + param("transferCounter", _.transferCounter), ) } final case class TransferInCommit( transferId: TransferId, contractMetadata: ContractMetadata, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) extends PrettyPrinting { override def pretty: Pretty[TransferInCommit] = prettyOfClass( param("transferId", _.transferId), param("contractMetadata", _.contractMetadata), - paramIfDefined("transferCounter", _.transferCounter), + param("transferCounter", _.transferCounter), ) } final case class ArchivalCommit( @@ -103,7 +102,7 @@ object CommitSet { consumedInputsOfHostedParties: Map[LfContractId, WithContractHash[Set[LfPartyId]]], transient: Map[LfContractId, WithContractHash[Set[LfPartyId]]], createdContracts: Map[LfContractId, SerializableContract], - )(protocolVersion: ProtocolVersion)(implicit loggingContext: ErrorLoggingContext): CommitSet = { + )(implicit loggingContext: ErrorLoggingContext): CommitSet = { if (activenessResult.isSuccessful) { val archivals = (consumedInputsOfHostedParties ++ transient).map { case (cid, hostedStakeholders) => @@ -115,7 +114,7 @@ object CommitSet { ), ) } - val transferCounter = Some(TransferCounter.Genesis) + val transferCounter = TransferCounter.Genesis val creations = createdContracts.fmap(c => WithContractHash.fromContract(c, CommitSet.CreationCommit(c.metadata, transferCounter)) ) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetector.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetector.scala index 6988e9b38..0bcdc8c9b 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetector.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetector.scala @@ -4,7 +4,7 @@ package com.digitalasset.canton.participant.protocol.conflictdetection import cats.Monad -import cats.data.NonEmptyChain +import cats.data.{Chain, NonEmptyChain} import cats.syntax.either.* import cats.syntax.foldable.* import cats.syntax.functor.* @@ -115,7 +115,7 @@ private[participant] class ConflictDetector( private[this] val directExecutionContext: DirectExecutionContext = DirectExecutionContext(noTracingLogger) - private[this] val initialTransferCounter = Some(TransferCounter.Genesis) + private[this] val initialTransferCounter = TransferCounter.Genesis /** Registers a pending activeness set. * This marks all contracts and keys in the `activenessSet` with a pending activeness check. @@ -398,24 +398,36 @@ private[participant] class ConflictDetector( lockedContracts.foreach { coid => val isActivation = creations.contains(coid) || transferIns.contains(coid) - val optTargetDomain = transferOuts.get(coid).map(_.unwrap.targetDomainId) - val isDeactivation = optTargetDomain.isDefined || archivals.contains(coid) + val transferOutO = transferOuts.get(coid).map(_.unwrap) + val isDeactivation = transferOutO.isDefined || archivals.contains(coid) if (isDeactivation) { if (isActivation) { logger.trace(withRC(rc, s"Activating and deactivating transient contract $coid.")) } else { logger.trace(withRC(rc, s"Deactivating contract $coid.")) } - val transferCounter = transferOuts.get(coid).flatMap(_.unwrap.transferCounter) - val newStatus = optTargetDomain.fold[Status](Archived) { targetDomain => - TransferredAway(targetDomain, transferCounter) + val newStatus = transferOutO.fold[Status](Archived) { transferOut => + TransferredAway( + transferOut.targetDomainId, + transferOut.transferCounter, + ) } contractStates.setStatusPendingWrite(coid, newStatus, toc) pendingContractWrites += coid } else if (isActivation) { val transferCounter = transferIns.get(coid) match { case Some(value) => value.unwrap.transferCounter - case None => creations.get(coid).flatMap(_.unwrap.transferCounter) + case None => + creations + .get(coid) + .map(_.unwrap.transferCounter) + .getOrElse( + ErrorUtil.internalError( + new IllegalStateException( + s"We did not find active contract $coid in $creations" + ) + ) + ) } logger.trace(withRC(rc, s"Activating contract $coid.")) @@ -461,7 +473,7 @@ private[participant] class ConflictDetector( */ logger.trace(withRC(rc, s"About to write commit set to the conflict detection stores")) val archivalWrites = acs.archiveContracts(archivals.keySet.to(LazyList), toc) - val creationWrites = acs.markContractsActive( + val creationWrites = acs.markContractsCreated( creations.keySet.map(cid => cid -> initialTransferCounter).to(LazyList), toc, ) @@ -526,7 +538,9 @@ private[participant] class ConflictDetector( ) ) runSequentially(s"evict states for request $rc") { - val result = results.sequence_.toEither + val result = results.sequence_.toEither.leftMap { chainNE => + NonEmptyChain.fromChainUnsafe(Chain.fromSeq(chainNE.toList.distinct)) + } logger.debug(withRC(rc, "Evicting states")) // Schedule evictions only if no shutdown is happening. (ecForCd is shut down before ecForAcs.) pendingContractWrites.foreach(contractStates.signalWriteAndTryEvict(rc, _)) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStatus.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStatus.scala index e72fe9f24..f9f9f1301 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStatus.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/conflictdetection/LockableStatus.scala @@ -37,12 +37,12 @@ private[conflictdetection] object LockableStatus { override def isFree(status: Status): Boolean = status match { case TransferredAway(_, _) => true - case Active(_) | Archived => false + case Active(_) | Archived | Purged => false } override def isActive(status: Status): Boolean = status match { case Active(_) => true - case Archived | TransferredAway(_, _) => false + case Archived | Purged | TransferredAway(_, _) => false } override def shouldEvict(status: Status): Boolean = true diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferData.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferData.scala index 498ada893..dce397a27 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferData.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferData.scala @@ -17,7 +17,7 @@ import com.digitalasset.canton.protocol.{ import com.digitalasset.canton.sequencing.protocol.MediatorsOfDomain import com.digitalasset.canton.util.OptionUtil import com.digitalasset.canton.version.Transfer.SourceProtocolVersion -import com.digitalasset.canton.{RequestCounter, TransferCounterO} +import com.digitalasset.canton.{RequestCounter, TransferCounter} /** Stores the data for a transfer that needs to be passed from the source domain to the target domain. */ final case class TransferData( @@ -48,7 +48,7 @@ final case class TransferData( def sourceMediator: MediatorsOfDomain = transferOutRequest.mediator - def transferCounter: TransferCounterO = Some(transferOutRequest.transferCounter) + def transferCounter: TransferCounter = transferOutRequest.transferCounter def addTransferOutResult(result: DeliveredTransferOutResult): Option[TransferData] = mergeTransferOutResult(Some(result)) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala index eb525c942..8da054edd 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingSteps.scala @@ -54,7 +54,6 @@ import com.digitalasset.canton.{ RequestCounter, SequencerCounter, TransferCounter, - TransferCounterO, checked, } @@ -585,7 +584,7 @@ private[transfer] class TransferInProcessingSteps( transferId: TransferId, rootHash: RootHash, isTransferringParticipant: Boolean, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, hostedStakeholders: List[LfPartyId], ): EitherT[Future, TransferProcessorError, LedgerSyncEvent.TransferredIn] = { val targetDomain = domainId @@ -645,11 +644,7 @@ private[transfer] class TransferInProcessingSteps( workflowId = submitterMetadata.workflowId, isTransferringParticipant = isTransferringParticipant, hostedStakeholders = hostedStakeholders, - transferCounter = transferCounter.getOrElse( - // Default value for protocol version earlier than dev - // TODO(#15179) Adapt when releasing BFT - TransferCounter.MinValue - ), + transferCounter = transferCounter, ) } } @@ -672,7 +667,7 @@ object TransferInProcessingSteps { override val requestSequencerCounter: SequencerCounter, rootHash: RootHash, contract: SerializableContract, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, submitterMetadata: TransferSubmitterMetadata, creatingTransactionId: TransactionId, isTransferringParticipant: Boolean, @@ -691,7 +686,7 @@ object TransferInProcessingSteps { submitterMetadata: TransferSubmitterMetadata, stakeholders: Set[LfPartyId], contract: SerializableContract, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, creatingTransactionId: TransactionId, targetDomain: TargetDomainId, targetMediator: MediatorsOfDomain, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidation.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidation.scala index a8384f90f..d4a0d8015 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidation.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidation.scala @@ -21,7 +21,7 @@ import com.digitalasset.canton.topology.* import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.EitherTUtil.condUnitET import com.digitalasset.canton.util.EitherUtil.condUnitE -import com.digitalasset.canton.{LfPartyId, TransferCounterO} +import com.digitalasset.canton.{LfPartyId, TransferCounter} import com.google.common.annotations.VisibleForTesting import scala.concurrent.{ExecutionContext, Future} @@ -287,8 +287,8 @@ object TransferInValidation { final case class InconsistentTransferCounter( transferId: TransferId, - declaredTransferCounter: TransferCounterO, - expectedTransferCounter: TransferCounterO, + declaredTransferCounter: TransferCounter, + expectedTransferCounter: TransferCounter, ) extends TransferInValidationError { override def message: String = s"Cannot transfer-in $transferId: Transfer counter $declaredTransferCounter in transfer-in does not match $expectedTransferCounter from the transfer-out" diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala index ee5edd11e..073b3aed0 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingSteps.scala @@ -33,6 +33,12 @@ import com.digitalasset.canton.participant.protocol.transfer.TransferOutProcesso } import com.digitalasset.canton.participant.protocol.transfer.TransferProcessingSteps.* import com.digitalasset.canton.participant.protocol.{ProcessingSteps, ProtocolProcessor} +import com.digitalasset.canton.participant.store.ActiveContractStore.{ + Active, + Archived, + Purged, + TransferredAway, +} import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.participant.sync.{LedgerSyncEvent, TimestampedEvent} import com.digitalasset.canton.participant.util.DAMLe @@ -133,16 +139,20 @@ class TransferOutProcessingSteps( ephemeralState.tracker .getApproximateStates(Seq(contractId)) .map(_.get(contractId) match { - case Some(state) if state.status.isActive => Right(state.status.transferCounter) case Some(state) => - Left(TransferOutProcessorError.DeactivatedContract(contractId, status = state.status)) + state.status match { + case Active(tc) => Right(tc) + case Archived | Purged | _: TransferredAway => + Left( + TransferOutProcessorError.DeactivatedContract(contractId, status = state.status) + ) + } case None => Left(TransferOutProcessorError.UnknownContract(contractId)) }) ).mapK(FutureUnlessShutdown.outcomeK) newTransferCounter <- EitherT.fromEither[FutureUnlessShutdown]( - transferCounter - .traverse(_.increment) + transferCounter.increment .leftMap(_ => TransferOutProcessorError.TransferCounterOverflow) ) @@ -577,7 +587,7 @@ class TransferOutProcessingSteps( creations = Map.empty, transferOuts = Map( contractId -> WithContractHash( - CommitSet.TransferOutCommit(targetDomain, stakeholders, Some(transferCounter)), + CommitSet.TransferOutCommit(targetDomain, stakeholders, transferCounter), contractHash, ) ), @@ -715,7 +725,7 @@ class TransferOutProcessingSteps( rootHash: RootHash, ): Option[ConfirmationResponse] = { val expectedPriorTransferCounter = Map[LfContractId, Option[ActiveContractStore.Status]]( - contractId -> Some(ActiveContractStore.Active(Some(declaredTransferCounter - 1))) + contractId -> Some(ActiveContractStore.Active(declaredTransferCounter - 1)) ) val successful = diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutRequest.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutRequest.scala index d404b27d7..f13498b57 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutRequest.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutRequest.scala @@ -16,7 +16,7 @@ import com.digitalasset.canton.topology.ParticipantId import com.digitalasset.canton.topology.client.TopologySnapshot import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.version.Transfer.{SourceProtocolVersion, TargetProtocolVersion} -import com.digitalasset.canton.{LfPartyId, TransferCounterO} +import com.digitalasset.canton.{LfPartyId, TransferCounter} import java.util.UUID import scala.concurrent.ExecutionContext @@ -41,7 +41,7 @@ final case class TransferOutRequest( targetDomain: TargetDomainId, targetProtocolVersion: TargetProtocolVersion, targetTimeProof: TimeProof, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) { def toFullTransferOutTree( @@ -97,7 +97,7 @@ object TransferOutRequest { targetProtocolVersion: TargetProtocolVersion, sourceTopology: TopologySnapshot, targetTopology: TopologySnapshot, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, logger: TracedLogger, )(implicit traceContext: TraceContext, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala index 3ac143dc5..416a12249 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/protocol/validation/TransactionValidationResult.scala @@ -10,7 +10,6 @@ import com.digitalasset.canton.participant.protocol.validation.ContractConsisten import com.digitalasset.canton.participant.protocol.validation.InternalConsistencyChecker.ErrorWithInternalConsistencyCheck import com.digitalasset.canton.participant.protocol.validation.TimeValidator.TimeCheckFailure import com.digitalasset.canton.protocol.* -import com.digitalasset.canton.version.ProtocolVersion import com.digitalasset.canton.{LfPartyId, WorkflowId} final case class TransactionValidationResult( @@ -39,12 +38,12 @@ final case class TransactionValidationResult( def commitSet( requestId: RequestId - )(protocolVersion: ProtocolVersion)(implicit loggingContext: ErrorLoggingContext): CommitSet = + )(implicit loggingContext: ErrorLoggingContext): CommitSet = CommitSet.createForTransaction( activenessResult, requestId, consumedInputsOfHostedParties, transient, createdContracts, - )(protocolVersion) + ) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala index fa8b6efa2..320c4156d 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessor.scala @@ -19,7 +19,13 @@ import com.digitalasset.canton.crypto.* import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} import com.digitalasset.canton.error.CantonErrorGroups.ParticipantErrorGroup.AcsCommitmentErrorGroup import com.digitalasset.canton.error.{Alarm, AlarmErrorCode, CantonError} -import com.digitalasset.canton.lifecycle.{FlagCloseable, FutureUnlessShutdown, Lifecycle} +import com.digitalasset.canton.health.{AtomicHealthComponent, ComponentHealthState} +import com.digitalasset.canton.lifecycle.{ + FlagCloseable, + FutureUnlessShutdown, + Lifecycle, + OnShutdownRunner, +} import com.digitalasset.canton.logging.* import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.participant.event.{ @@ -29,6 +35,7 @@ import com.digitalasset.canton.participant.event.{ RecordTime, } import com.digitalasset.canton.participant.metrics.PruningMetrics +import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.Errors.DegradationError import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.Errors.MismatchError.AcsCommitmentAlarm import com.digitalasset.canton.participant.store.* import com.digitalasset.canton.protocol.ContractIdSyntax.* @@ -48,7 +55,6 @@ import com.digitalasset.canton.sequencing.client.SendAsyncClientError.RequestRef import com.digitalasset.canton.sequencing.client.{SendType, SequencerClientSend} import com.digitalasset.canton.sequencing.protocol.{Batch, OpenEnvelope, Recipients, SendAsyncError} import com.digitalasset.canton.store.SequencerCounterTrackerStore -import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.processing.EffectiveTime import com.digitalasset.canton.topology.{DomainId, ParticipantId} import com.digitalasset.canton.tracing.{TraceContext, Traced} @@ -58,9 +64,8 @@ import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.* import com.digitalasset.canton.util.retry.Policy import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{DiscardOps, LfPartyId, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{DiscardOps, LfPartyId, TransferCounter} import com.google.common.annotations.VisibleForTesting -import com.google.protobuf.ByteString import java.util.concurrent.atomic.AtomicReference import scala.annotation.tailrec @@ -161,9 +166,6 @@ class AcsCommitmentProcessor( pruningObserver: TraceContext => Unit, metrics: PruningMetrics, protocolVersion: ProtocolVersion, - @VisibleForTesting private[pruning] val acsCommitmentsCatchUpConfig: Option[ - AcsCommitmentsCatchUpConfig - ], override protected val timeouts: ProcessingTimeout, futureSupervisor: FutureSupervisor, activeContractStore: ActiveContractStore, @@ -176,7 +178,11 @@ class AcsCommitmentProcessor( with NamedLogging { import AcsCommitmentProcessor.* - + private[canton] val healthComponent = new AcsCommitmentProcessorHealth( + AcsCommitmentProcessor.healthName, + this, + logger, + ) // As the commitment computation is in the worst case expected to last the same order of magnitude as the // reconciliation interval, wait for at least that long override protected def closingTimeout: FiniteDuration = { @@ -215,6 +221,16 @@ class AcsCommitmentProcessor( @volatile private[this] var endOfLastProcessedPeriodDuringCatchUp: Option[CantonTimestampSecond] = None + /** During a coarse-grained catch-up interval, [[runningCmtSnapshotsForCatchUp]] stores in memory the snapshots for the + * fine grained reconciliation periods. In case of a commitment mismatch at the end of a catch-up interval, the + * participant uses these snapshots in the function [[sendCommitmentMessagesInCatchUpInterval]] to compute fine + * grained commitments and send them to the counter-participant, which enables more precise detection of the interval + * when ACS divergence happened. + */ + private val runningCmtSnapshotsForCatchUp = + scala.collection.mutable.Map + .empty[CommitmentPeriod, Map[SortedSet[LfPartyId], AcsCommitment.CommitmentType]] + /** A future checking whether the node should enter catch-up mode by computing the catch-up timestamp. * At most one future runs computing this */ @@ -225,6 +241,7 @@ class AcsCommitmentProcessor( val runningCommitments: Future[RunningCommitments] = initRunningCommitments(store) private val cachedCommitments: CachedCommitments = new CachedCommitments() + private val cachedCommitmentsForRetroactiveSends: CachedCommitments = new CachedCommitments() private val timestampsWithPotentialTopologyChanges = new AtomicReference[List[Traced[EffectiveTime]]](List()) @@ -273,7 +290,7 @@ class AcsCommitmentProcessor( s"Initialized from stored snapshot at ${snapshot.watermark} (might be incomplete)" ) - _ <- lastComputed.fold(Future.unit)(processBuffered) + _ <- lastComputed.fold(Future.unit)(ts => processBuffered(ts, endExclusive = false)) _ = logger.info("Initialized the ACS commitment processor queue") } yield () @@ -291,34 +308,60 @@ class AcsCommitmentProcessor( /** Indicates what timestamp the participant catches up to. */ @volatile private[this] var catchUpToTimestamp = CantonTimestamp.MinValue - private def catchUpInProgress(crtTimestamp: CantonTimestamp): Boolean = - acsCommitmentsCatchUpConfig.isDefined && catchUpToTimestamp >= crtTimestamp + private[pruning] def catchUpConfig( + cantonTimestamp: CantonTimestamp + )(implicit + traceContext: TraceContext + ): Future[Option[AcsCommitmentsCatchUpConfig]] = { + for { + snapshot <- domainCrypto.ipsSnapshot(cantonTimestamp) + config <- snapshot.findDynamicDomainParametersOrDefault( + protocolVersion, + warnOnUsingDefault = false, + ) + } yield { config.acsCommitmentsCatchUpConfig } + } + + private def catchUpInProgress(crtTimestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Boolean] = { + for { + config <- catchUpConfig(crtTimestamp) + } yield config.isDefined && catchUpToTimestamp >= crtTimestamp + + } - private def caughtUpToBoundary(timestamp: CantonTimestamp): Boolean = - acsCommitmentsCatchUpConfig.isDefined && timestamp == catchUpToTimestamp + private def caughtUpToBoundary(timestamp: CantonTimestamp)(implicit + traceContext: TraceContext + ): Future[Boolean] = + for { + config <- catchUpConfig(timestamp) + } yield config.isDefined && catchUpToTimestamp == timestamp - /** Detects whether the participant should trigger or exit catch-up. - * In case a catch-up should start, returns the catch-up timestamp; the participant will catch up to the - * next published tick >= catch-up timestamp. - * Otherwise returns the prior catch-up boundary, which might be in the past when no catch-up is in progress. + /** Detects whether the participant is lagging too far behind (w.r.t. the catchUp config) in commitment computation. + * If lagging behind, the method returns a new catch-up timestamp, otherwise it returns the existing [[catchUpToTimestamp]]. + * It is up to the caller to update the [[catchUpToTimestamp]] accordingly. + * + * Note that, even if the participant is not lagging too far behind, it does not mean it "caught up". + * In particular, if the participant's current timestamp is behind [[catchUpToTimestamp]], then the participant is + * still catching up. Please use the method [[catchUpInProgress]] to determine whether the participant has caught up. */ private def computeCatchUpTimestamp( - completedPeriodTimestamp: CantonTimestamp + completedPeriodTimestamp: CantonTimestamp, + config: Option[AcsCommitmentsCatchUpConfig], )(implicit traceContext: TraceContext): Future[CantonTimestamp] = { - if (acsCommitmentsCatchUpConfig.isDefined) { - for { - catchUpBoundaryTimestamp <- laggingTooFarBehind(completedPeriodTimestamp) - } yield { - if (catchUpBoundaryTimestamp != completedPeriodTimestamp) { - logger.debug( - s"Computed catch up boundary when processing end of period $completedPeriodTimestamp: computed catch-up timestamp is $catchUpBoundaryTimestamp" - ) - catchUpBoundaryTimestamp - } else { - catchUpToTimestamp - } + for { + catchUpBoundaryTimestamp <- laggingTooFarBehind(completedPeriodTimestamp, config) + } yield { + if (config.isDefined && catchUpBoundaryTimestamp != completedPeriodTimestamp) { + logger.debug( + s"Computed catch up boundary when processing end of period $completedPeriodTimestamp: computed catch-up timestamp is $catchUpBoundaryTimestamp" + ) + catchUpBoundaryTimestamp + } else { + catchUpToTimestamp } - } else Future.successful(catchUpToTimestamp) + } } def initializeTicksOnStartup( @@ -406,11 +449,13 @@ class AcsCommitmentProcessor( * processing in [[publish]]. [[checkAndTriggerCatchUpMode]] checks whether the participant received commitments * from a period that's significantly ahead the participant's current period. * - * During catch-up mode, the participant still computes and stores local commitments at reconciliation ticks, just - * as before. However, the participant sends out commitments only at catch-up interval boundaries, and check it - * against the incoming commitments. To enable fine-grained mismatch detection in case of a mismatch during catch-up, - * the participant sends out commitments for each reconciliation interval covered by the catch-up period to those - * counter-participants whose commitments do not match. + * During catch-up mode, the participant computes, stores and sends commitments to counter-participants only at + * catch-up interval boundaries, instead of at each reconciliation tick. In case of a commitment mismatch during + * catch-up at a catch-up interval boundary, the participant sends out commitments for each reconciliation + * interval covered by the catch-up period to those counter-participants (configurable) + * (a) whose catch-up interval boundary commitments do not match + * (b) *** default *** whose catch-up interval boundary commitments do not match or who haven't sent a + * catch-up interval boundary commitment yet */ private def publishTick(toc: RecordTime, acsChange: AcsChange)(implicit traceContext: TraceContext @@ -427,49 +472,64 @@ class AcsCommitmentProcessor( def processCompletedPeriod( snapshot: RunningCommitments )(completedPeriod: CommitmentPeriod, cryptoSnapshot: SyncCryptoApi): Future[Unit] = { - - // We update `catchUpTimestamp` only if the future computing `computedNewCatchUpTimestamp` has returned by - // this point. If `catchUpToTimestamp` is greater or equal to the participant's end of period, then the - // participant enter catch up mode up to `catchUpTimestamp`. - // Important: `catchUpToTimestamp` is not updated concurrently, because `processCompletedPeriod` runs - // sequentially on the `dbQueue`. Moreover, the outer `performPublish` queue inserts `processCompletedPeriod` - // sequentially in the order of the timestamps, which is the key to ensuring that it - // grows monotonically and that catch-ups are towards the future. - if (acsCommitmentsCatchUpConfig.isDefined && computingCatchUpTimestamp.isCompleted) { - computingCatchUpTimestamp.value.foreach { v => - v.fold( - exc => logger.error(s"Error when computing the catch up timestamp", exc), - res => catchUpToTimestamp = res, + for { + // We update `catchUpTimestamp` only if the future computing `computedNewCatchUpTimestamp` has returned by + // this point. If `catchUpToTimestamp` is greater or equal to the participant's end of period, then the + // participant enters catch up mode up to `catchUpTimestamp`. + // Important: `catchUpToTimestamp` is not updated concurrently, because `processCompletedPeriod` runs + // sequentially on the `dbQueue`. Moreover, the outer `performPublish` queue inserts `processCompletedPeriod` + // sequentially in the order of the timestamps, which is the key to ensuring that it + // grows monotonically and that catch-ups are towards the future. + config <- catchUpConfig(completedPeriod.toInclusive.forgetRefinement) + + _ = if (config.isDefined && computingCatchUpTimestamp.isCompleted) { + computingCatchUpTimestamp.value.foreach { v => + v.fold( + exc => logger.error(s"Error when computing the catch up timestamp", exc), + res => catchUpToTimestamp = res, + ) + } + computingCatchUpTimestamp = computeCatchUpTimestamp( + completedPeriod.toInclusive.forgetRefinement, + config, ) } - computingCatchUpTimestamp = computeCatchUpTimestamp( + + // Evaluate in the beginning the catch-up conditions for simplicity + catchingUpInProgress <- catchUpInProgress(completedPeriod.toInclusive.forgetRefinement) + hasCaughtUpToBoundaryRes <- caughtUpToBoundary( completedPeriod.toInclusive.forgetRefinement ) - } - // Evaluate in the beginning the catch-up conditions for simplicity - val catchingUpInProgress = catchUpInProgress(completedPeriod.toInclusive.forgetRefinement) - val hasCaughtUpToBoundaryRes = caughtUpToBoundary( - completedPeriod.toInclusive.forgetRefinement - ) + _ = if (catchingUpInProgress && healthComponent.isOk) { + metrics.commitments.catchupModeEnabled.mark() + logger.debug(s"Entered catch-up mode with config ${config.toString}") + if (config.exists(cfg => cfg.catchUpIntervalSkip.value == 1)) + healthComponent.degradationOccurred( + DegradationError.AcsCommitmentDegradationWithIneffectiveConfig.Report() + ) + else + healthComponent.degradationOccurred(DegradationError.AcsCommitmentDegradation.Report()) + } - logger.debug( - show"Processing completed period $completedPeriod. Modes: in catch-up mode = $catchingUpInProgress, " + - show"and if yes, caught up to catch-up boundary $hasCaughtUpToBoundaryRes" - ) + _ = logger.debug( + show"Processing completed period $completedPeriod. Modes: in catch-up mode = $catchingUpInProgress, " + + show"and if yes, caught up to catch-up boundary $hasCaughtUpToBoundaryRes" + ) - // If there is a commitment mismatch at the end of the catch-up period, we need to send fine-grained commitments - // starting at `endOfLastProcessedPeriod` and ending at `endOfLastProcessedPeriodDuringCatchUp` for all - // reconciliation intervals covered by the catch-up period. - // However, `endOfLastProcessedPeriod` and `endOfLastProcessedPeriodDuringCatchUp` are updated when marking - // the period as processed during catch-up, and when processing a catch-up, respectively, therefore - // we save their prior values. - val lastSentCatchUpCommitmentTimestamp = endOfLastProcessedPeriod - val lastProcessedCatchUpCommitmentTimestamp = endOfLastProcessedPeriodDuringCatchUp - - val snapshotRes = snapshot.snapshot() - logger.debug(show"Commitment snapshot for completed period $completedPeriod: $snapshotRes") - for { + // If there is a commitment mismatch at the end of the catch-up period, we need to send fine-grained commitments + // starting at `endOfLastProcessedPeriod` and ending at `endOfLastProcessedPeriodDuringCatchUp` for all + // reconciliation intervals covered by the catch-up period. + // However, `endOfLastProcessedPeriod` and `endOfLastProcessedPeriodDuringCatchUp` are updated when marking + // the period as processed during catch-up, and when processing a catch-up, respectively, therefore + // we save their prior values. + lastSentCatchUpCommitmentTimestamp = endOfLastProcessedPeriod + lastProcessedCatchUpCommitmentTimestamp = endOfLastProcessedPeriodDuringCatchUp + + snapshotRes = snapshot.snapshot() + _ = logger.debug( + show"Commitment snapshot for completed period $completedPeriod: $snapshotRes" + ) // Detect possible inconsistencies of the running commitments and the ACS state // Runs only when enableAdditionalConsistencyChecks is true // *** Should not be enabled in production *** @@ -481,38 +541,50 @@ class AcsCommitmentProcessor( completedPeriod.toInclusive.forgetRefinement, ) - msgs <- commitmentMessages(completedPeriod, snapshotRes.active, cryptoSnapshot) - _ = logger.debug( - show"Commitment messages for $completedPeriod: ${msgs.fmap(_.message.commitment)}" - ) - - _ <- storeCommitments(msgs) - _ = logger.debug( - s"Computed and stored ${msgs.size} commitment messages for period $completedPeriod" - ) - _ <- if (!catchingUpInProgress || hasCaughtUpToBoundaryRes) { - store.markOutstanding(completedPeriod, msgs.keySet) + for { + msgs <- commitmentMessages(completedPeriod, snapshotRes.active, cryptoSnapshot) + _ = logger.debug( + show"Commitment messages for $completedPeriod: ${msgs.fmap(_.message.commitment)}" + ) + _ <- storeCommitments(msgs) + _ = logger.debug( + s"Computed and stored ${msgs.size} commitment messages for period $completedPeriod" + ) + _ <- store.markOutstanding(completedPeriod, msgs.keySet) + _ <- persistRunningCommitments(snapshotRes) + } yield { + sendCommitmentMessages(completedPeriod, msgs) + } } else Future.unit - _ <- persistRunningCommitments(snapshotRes) - - // mark the period as processed during catch-up - _ = if (catchingUpInProgress) { - endOfLastProcessedPeriodDuringCatchUp = Some(completedPeriod.toInclusive) - } - - _ = - if (!catchingUpInProgress || hasCaughtUpToBoundaryRes) { - sendCommitmentMessages(completedPeriod, msgs) - } + _ <- + if (catchingUpInProgress) { + for { + _ <- + if (!hasCaughtUpToBoundaryRes) { + for { + // persist running commitments for crash recovery + _ <- persistRunningCommitments(snapshotRes) + _ <- persistCatchUpPeriod(completedPeriod) + } yield { + // store running commitments in memory, in order to compute and send fine-grained commitments + // if a mismatch appears after catch-up + runningCmtSnapshotsForCatchUp += completedPeriod -> snapshotRes.active + } + } else Future.unit + // mark the period as processed during catch-up + _ = endOfLastProcessedPeriodDuringCatchUp = Some(completedPeriod.toInclusive) + } yield () + } else Future.unit _ <- if (!catchingUpInProgress) { + healthComponent.resolveUnhealthy() indicateReadyForRemote(completedPeriod.toInclusive) for { - _ <- processBuffered(completedPeriod.toInclusive) + _ <- processBuffered(completedPeriod.toInclusive, endExclusive = false) _ <- indicateLocallyProcessed(completedPeriod) } yield () } else Future.unit @@ -521,19 +593,24 @@ class AcsCommitmentProcessor( _ <- if (hasCaughtUpToBoundaryRes) { for { - _ <- msgs.toList.parTraverse_ { case (_participantId, signedMessage) => - checkMatchAndMarkSafeOrFixDuringCatchUp( - lastSentCatchUpCommitmentTimestamp, - lastProcessedCatchUpCommitmentTimestamp, - signedMessage.message, - cryptoSnapshot, - ) - } + _ <- checkMatchAndMarkSafeOrFixDuringCatchUp( + lastSentCatchUpCommitmentTimestamp, + lastProcessedCatchUpCommitmentTimestamp, + completedPeriod, + cryptoSnapshot, + ) // The ordering here is important; we shouldn't move `readyForRemote` before we mark the periods as outstanding, // as otherwise we can get a race where an incoming commitment doesn't "clear" the outstanding period _ = indicateReadyForRemote(completedPeriod.toInclusive) + // Processes buffered counter-commitments for the catch-up period and compares them with local commitments, + // which are available if there was a mismatch at the catch-up boundary + // Ignore the buffered commitment at the boundary + _ <- processBuffered(completedPeriod.toInclusive, endExclusive = true) // *After the above check* (the order matters), mark all reconciliation intervals as locally processed. _ <- indicateLocallyProcessed(completedPeriod) + // clear the commitment snapshot in memory once we caught up + _ = runningCmtSnapshotsForCatchUp.clear() + _ = cachedCommitmentsForRetroactiveSends.clear() } yield () } else Future.unit @@ -552,26 +629,27 @@ class AcsCommitmentProcessor( periodEndO: Option[CantonTimestampSecond], ): FutureUnlessShutdown[Unit] = { // Check whether this change pushes us to a new commitment period; if so, the previous one is completed - val completedPeriodAndCryptoO = for { - periodEnd <- periodEndO - endOfLast = - if ( - catchUpInProgress( - endOfLastProcessedPeriod.fold(CantonTimestamp.MinValue)(res => res.forgetRefinement) - ) - ) { - endOfLastProcessedPeriodDuringCatchUp - } else { - endOfLastProcessedPeriod + for { + catchingUp <- FutureUnlessShutdown.outcomeF( + catchUpInProgress( + endOfLastProcessedPeriod.fold(CantonTimestamp.MinValue)(res => res.forgetRefinement) + ) + ) + completedPeriodAndCryptoO = for { + periodEnd <- periodEndO + endOfLast = { + if (catchingUp) { + endOfLastProcessedPeriodDuringCatchUp + } else { + endOfLastProcessedPeriod + } } - completedPeriod <- reconciliationIntervals - .commitmentPeriodPreceding(periodEnd, endOfLast) - cryptoSnapshot <- cryptoSnapshotO - } yield { - (completedPeriod, cryptoSnapshot) - } + completedPeriod <- reconciliationIntervals.commitmentPeriodPreceding(periodEnd, endOfLast) + cryptoSnapshot <- cryptoSnapshotO + } yield { + (completedPeriod, cryptoSnapshot) + } - for { // Important invariant: // - let t be the tick of [[com.digitalasset.canton.participant.store.AcsCommitmentStore#lastComputedAndSent]]; // assume that t is not None @@ -758,6 +836,50 @@ class AcsCommitmentProcessor( .map(_ => logger.debug(s"Persisted ACS commitments at ${res.recordTime}")) } + /** Store special empty commitment to remember we were in catch-up mode, + * with the current participant as the counter-participant. + * Because the special commitment have the current participant as counter-participant, they do not conflict + * with "normal operation" commitments, which have other participants as counter-participants. + */ + private def persistCatchUpPeriod(period: CommitmentPeriod)(implicit + traceContext: TraceContext + ): Future[Unit] = { + val catchUpCmt = AcsCommitment.create( + domainId, + participantId, + participantId, + period, + AcsCommitmentProcessor.emptyCommitment, + protocolVersion, + ) + storeCommitments( + Map( + participantId -> SignedProtocolMessage.from( + catchUpCmt, + protocolVersion, + Signature.noSignature, + ) + ) + ) + } + + private def isCatchUpPeriod(period: CommitmentPeriod)(implicit + traceContext: TraceContext + ): Future[Boolean] = { + for { + possibleCatchUpCmts <- store.getComputed(period, participantId) + } yield { + val response = possibleCatchUpCmts.nonEmpty && + possibleCatchUpCmts.forall { case (_period, commitment) => + commitment == AcsCommitmentProcessor.emptyCommitment + } + logger.debug( + s"Period $period is a catch-up period $response with the computed catch-up commitments $possibleCatchUpCmts" + ) + response + } + } + private def updateSnapshot(rt: RecordTime, acsChange: AcsChange)(implicit traceContext: TraceContext ): Future[Unit] = { @@ -866,15 +988,21 @@ class AcsCommitmentProcessor( } private def processBuffered( - timestamp: CantonTimestampSecond + timestamp: CantonTimestampSecond, + endExclusive: Boolean, )(implicit traceContext: TraceContext): Future[Unit] = { - logger.debug(s"Processing buffered commitments until $timestamp") + logger.debug(s"Processing buffered commitments until $timestamp ${if (endExclusive) "exclusive" + else "inclusive"}") for { - toProcess <- store.queue.peekThrough(timestamp.forgetRefinement) + toProcessInclusive <- store.queue.peekThrough(timestamp.forgetRefinement) + toProcess = + if (endExclusive) toProcessInclusive.filterNot(c => c.period.toInclusive == timestamp) + else toProcessInclusive _ <- checkMatchAndMarkSafe(toProcess) } yield { logger.debug( - s"Checked buffered remote commitments up to $timestamp and ready to check further ones without buffering" + s"Checked buffered remote commitments up to $timestamp ${if (endExclusive) "exclusive" + else "inclusive"} and ready to check further ones without buffering" ) } } @@ -884,9 +1012,12 @@ class AcsCommitmentProcessor( remote: AcsCommitment, local: Iterable[(CommitmentPeriod, AcsCommitment.CommitmentType)], lastPruningTime: Option[CantonTimestamp], + possibleCatchUp: Boolean = false, )(implicit traceContext: TraceContext): Boolean = { if (local.isEmpty) { - if (lastPruningTime.forall(_ < remote.period.toInclusive.forgetRefinement)) { + if ( + !possibleCatchUp && lastPruningTime.forall(_ < remote.period.toInclusive.forgetRefinement) + ) { // We do not run in an infinite loop of sending empty commitments to each other: // If we receive an empty commitment from a counter-participant, it is because the current participant // sent a commitment to them when they didn't have a shared contract with the current participant @@ -901,6 +1032,9 @@ class AcsCommitmentProcessor( cryptoSnapshot <- domainCrypto.awaitSnapshot( remote.period.toInclusive.forgetRefinement ) + // Due to the condition of this branch, in catch-up mode we don't reply with an empty commitment in between + // catch-up boundaries. If the counter-participant thinks that there is still a shared contract at the + // end of the catch-up boundary, we then reply with an empty commitment. msg <- signCommitment( cryptoSnapshot, remote.sender, @@ -913,8 +1047,14 @@ class AcsCommitmentProcessor( ), s"Failed to respond empty ACS back to ${remote.sender}", ) - } else - logger.info(s"Ignoring incoming commitment for a pruned period: $remote") + } else { + if (lastPruningTime.forall(_ >= remote.period.toInclusive.forgetRefinement)) + logger.info(s"Ignoring incoming commitment for a pruned period: $remote") + if (possibleCatchUp) + logger.info( + s"Ignoring incoming commitment for a skipped period due to catch-up: $remote" + ) + } false } else { local.filter(_._2 != remote.commitment) match { @@ -941,71 +1081,122 @@ class AcsCommitmentProcessor( remote.parTraverse_ { cmt => for { commitments <- store.getComputed(cmt.period, cmt.sender) + // check if we were in a catch-up phase + possibleCatchUp <- isCatchUpPeriod(cmt.period) lastPruningTime <- store.pruningStatus _ <- - if (matches(cmt, commitments, lastPruningTime.map(_.timestamp))) { + if (matches(cmt, commitments, lastPruningTime.map(_.timestamp), possibleCatchUp)) { store.markSafe(cmt.sender, cmt.period, sortedReconciliationIntervalsProvider) } else Future.unit } yield () } } + /** Checks whether at the end of `completedPeriod` the commitments with the counter-participants match. In case of + * mismatch, sends fine-grained commitments to enable fine-grained mismatch detection. + * @param lastSentCatchUpCommitmentTimestamp + * @param lastProcessedCatchUpCommitmentTimestamp + * @param completedPeriod + * @param filterInJustMismatches If true, send fine-grained commitments only to counter-participants from whom we + * have mismatching cmts at the catch-up boundary. If false, send fine-grained + * commitments to all counter-participants from whom we don't have cmts, or cmts do + * not match at the catch-up boundary. + */ private def checkMatchAndMarkSafeOrFixDuringCatchUp( lastSentCatchUpCommitmentTimestamp: Option[CantonTimestampSecond], lastProcessedCatchUpCommitmentTimestamp: Option[CantonTimestampSecond], - own: AcsCommitment, + completedPeriod: CommitmentPeriod, cryptoSnapshot: SyncCryptoApi, + filterInJustMismatches: Boolean = false, )(implicit traceContext: TraceContext): Future[Unit] = { - logger.debug(s"Processing own commitment ${own} ") + logger.debug( + s"$participantId checkMatchAndMarkSafeOrFixDuringCatchUp for period $completedPeriod" + ) for { - counterCommitmentList <- store.queue.peekOverlapsForCounterParticipant( - own.period, - own.counterParticipant, - )(traceContext) - lastPruningTime <- store.pruningStatus - - nrIntervals <- sortedReconciliationIntervalsProvider.computeReconciliationIntervalsCovering( - own.period.fromExclusive.forgetRefinement, - own.period.toInclusive.forgetRefinement, + // retrieve commitments computed at catch-up boundary + computed <- store.searchComputedBetween( + completedPeriod.fromExclusive.forgetRefinement, + completedPeriod.toInclusive.forgetRefinement, ) - _ = if (counterCommitmentList.size > nrIntervals.size) { - AcsCommitmentAlarm - .Warn( - s"""There should be at most ${nrIntervals.size} commitments from counter-participant - |${own.counterParticipant} covering the period ${own.period.fromExclusive} to ${own.period.toInclusive}), - |but we have the following ${counterCommitmentList.size}""".stripMargin - ) - .report() - } - // get lists of counter-commitments that match and, respectively, do not match locally computed commitments - (matching, mismatches) = counterCommitmentList.partition(counterCommitment => - matches( - counterCommitment, - List((own.period, own.commitment)), - lastPruningTime.map(_.timestamp), - ) + intervals <- sortedReconciliationIntervalsProvider.computeReconciliationIntervalsCovering( + completedPeriod.fromExclusive.forgetRefinement, + completedPeriod.toInclusive.forgetRefinement, ) - // we mark safe all matching counter-commitments - _ <- matching.parTraverse_ { counterCommitment => - logger.debug(s"Marked as safe commitment $own against counterComm $counterCommitment") - store.markSafe( - counterCommitment.sender, - counterCommitment.period, - sortedReconciliationIntervalsProvider, + _ <- computed.toList.parTraverse_ { case (period, counterParticipant, cmt) => + logger.debug( + s"Processing own commitment ${cmt} for period $period and counter-participant $counterParticipant" ) - } + for { + counterCommitmentList <- store.queue.peekOverlapsForCounterParticipant( + period, + counterParticipant, + )(traceContext) + + lastPruningTime <- store.pruningStatus + + _ = if (counterCommitmentList.size > intervals.size) { + AcsCommitmentAlarm + .Warn( + s"""There should be at most ${intervals.size} commitments from counter-participant + |${counterParticipant} covering the period ${completedPeriod.fromExclusive} to ${completedPeriod.toInclusive}), + |but we have the following ${counterCommitmentList.size}""".stripMargin + ) + .report() + } + + // check if we were in a catch-up phase + possibleCatchUp <- isCatchUpPeriod(period) + + // get lists of counter-commitments that match and, respectively, do not match locally computed commitments + (matching, mismatches) = counterCommitmentList.partition(counterCommitment => + matches( + counterCommitment, + List((period, cmt)), + lastPruningTime.map(_.timestamp), + possibleCatchUp, + ) + ) + + // we mark safe all matching counter-commitments + _ <- matching.parTraverse_ { counterCommitment => + logger.debug(s"Marked as safe commitment $cmt against counterComm $counterCommitment") + store.markSafe( + counterCommitment.sender, + counterCommitment.period, + sortedReconciliationIntervalsProvider, + ) + } + + // if there is a mismatch, send all fine-grained commitments between `lastSentCatchUpCommitmentTimestamp` + // and `lastProcessedCatchUpCommitmentTimestamp` + _ <- + if (mismatches.nonEmpty) { + for { + res <- + if (!filterInJustMismatches) { + // send to all counter-participants from whom either I don't have cmts or I have cmts but they don't match + sendCommitmentMessagesInCatchUpInterval( + lastSentCatchUpCommitmentTimestamp, + lastProcessedCatchUpCommitmentTimestamp, + cryptoSnapshot, + filterOutParticipantId = matching.map(c => c.counterParticipant), + ) + } else { + // send to all counter-participants from whom I have cmts but they don't match + sendCommitmentMessagesInCatchUpInterval( + lastSentCatchUpCommitmentTimestamp, + lastProcessedCatchUpCommitmentTimestamp, + cryptoSnapshot, + filterInParticipantId = mismatches.map(c => c.counterParticipant), + filterOutParticipantId = matching.map(c => c.counterParticipant), + ) + } + } yield res + } else Future.unit + } yield () - // if there is a mismatch, send all fine-grained commitments between `lastSentCatchUpCommitmentTimestamp` - // and `lastProcessedCatchUpCommitmentTimestamp` - _ <- mismatches.headOption.traverse_ { counterCommitment => - sendCommitmentMessagesInCatchUpInterval( - lastSentCatchUpCommitmentTimestamp, - lastProcessedCatchUpCommitmentTimestamp, - cryptoSnapshot, - counterCommitment.sender, - ) } } yield () } @@ -1073,9 +1264,10 @@ class AcsCommitmentProcessor( * @return The catch-up timestamp, if the node needs to catch-up, otherwise the given completedPeriodTimestamp. */ private def laggingTooFarBehind( - completedPeriodTimestamp: CantonTimestamp + completedPeriodTimestamp: CantonTimestamp, + config: Option[AcsCommitmentsCatchUpConfig], )(implicit traceContext: TraceContext): Future[CantonTimestamp] = { - acsCommitmentsCatchUpConfig match { + config match { case Some(cfg) => for { sortedReconciliationIntervals <- sortedReconciliationIntervalsProvider @@ -1098,44 +1290,6 @@ class AcsCommitmentProcessor( } } - /** Send the computed commitment messages for all intervals covered by the coarse-grained - * catch-up period between `fromExclusive` to `toInclusive`. - * The caller should ensure that `fromExclusive` and `toInclusive` represent valid reconciliation ticks. - * If `fromExclusive` and/or `toInclusive` are None, they get the value CantonTimestampSecond.MinValue. - */ - private def sendCommitmentMessagesInCatchUpInterval( - fromExclusive: Option[CantonTimestampSecond], - toInclusive: Option[CantonTimestampSecond], - cryptoSnapshot: SyncCryptoApi, - pid: ParticipantId, - )(implicit - traceContext: TraceContext - ): Future[Unit] = { - val fromExclusiveSeconds = fromExclusive.getOrElse(CantonTimestampSecond.MinValue) - val toInclusiveSeconds = toInclusive.getOrElse(CantonTimestampSecond.MinValue) - - val period = CommitmentPeriod( - fromExclusiveSeconds, - PositiveSeconds.tryOfSeconds( - toInclusiveSeconds.getEpochSecond - fromExclusiveSeconds.getEpochSecond - ), - ) - for { - cmt <- store.getComputed(period, pid) - _ <- cmt.toList.parTraverse_ { case (period, commitment) => - for { - signedCommitment <- signCommitment(cryptoSnapshot, pid, commitment, period) - _ = logger.debug( - s"Due to mismatch, sending commitment for period $period to counterP $pid" - ) - // TODO(i15333) batch more commitments and handle the case when we reach the maximum message limit. - _ = sendCommitmentMessages(period, Map(pid -> signedCommitment)) - - } yield () - } - } yield () - } - /** Store the computed commitments of the commitment messages */ private def storeCommitments( msgs: Map[ParticipantId, SignedProtocolMessage[AcsCommitment]] @@ -1179,6 +1333,117 @@ class AcsCommitmentProcessor( }.discard } } + + /** Computes and sends commitment messages to counter-participants for all (period,snapshot) pairs in + * [[runningCmtSnapshotsForCatchUp]] for the last catch-up interval. + * The counter-participants are by default all counter-participant at the end of each interval, to which we apply the + * filters `filterInParticipantId` and `filterOutParticipantId`. + * These snapshots should cover the last catch-up interval, namely between `fromExclusive` to `toInclusive`, + * otherwise we throw an [[IllegalStateException]]. + * The caller should ensure that fromExclusive` and `toInclusive` represent valid reconciliation ticks. + * If `fromExclusive` < `toInclusive`, we throw an [[IllegalStateException]]. + * If `fromExclusive` and/or `toInclusive` are None, they get the value CantonTimestampSecond.MinValue. + */ + private def sendCommitmentMessagesInCatchUpInterval( + fromExclusive: Option[CantonTimestampSecond], + toInclusive: Option[CantonTimestampSecond], + cryptoSnapshot: SyncCryptoApi, + filterInParticipantId: Seq[ParticipantId] = Seq.empty, + filterOutParticipantId: Seq[ParticipantId] = Seq.empty, + )(implicit + traceContext: TraceContext + ): Future[Unit] = { + val fromExclusiveSeconds = fromExclusive.getOrElse(CantonTimestampSecond.MinValue) + val toInclusiveSeconds = toInclusive.getOrElse(CantonTimestampSecond.MinValue) + + if (fromExclusiveSeconds > toInclusiveSeconds) + throw new IllegalStateException( + s"$fromExclusive needs to be <= $toInclusive" + ) + + if (runningCmtSnapshotsForCatchUp.isEmpty && fromExclusiveSeconds < toInclusiveSeconds) + throw new IllegalStateException( + s"No active snapshots for catchupInterval [$fromExclusive, $toInclusive], but there should be" + ) + + if (runningCmtSnapshotsForCatchUp.nonEmpty && fromExclusiveSeconds == toInclusiveSeconds) + throw new IllegalStateException( + s"Active snapshots $runningCmtSnapshotsForCatchUp for catchupInterval [$fromExclusive, $toInclusive], but there shouldn't be" + ) + + val sortedPeriods = runningCmtSnapshotsForCatchUp.keySet.toList.sortBy(c => c.toInclusive) + sortedPeriods.headOption.fold(()) { c => + if (c.fromExclusive != fromExclusiveSeconds) + throw new IllegalStateException( + s"Wrong fromExclusive ${c.fromExclusive} in the active commitment snapshots, should be $fromExclusiveSeconds" + ) + else () + } + + sortedPeriods.lastOption.fold(()) { c => + if (c.toInclusive != toInclusiveSeconds) + throw new IllegalStateException( + s"Wrong toInclusive ${c.toInclusive} in the active commitment snapshots, should be $toInclusiveSeconds" + ) + else () + } + + sortedPeriods.sliding(2).foreach { + case Seq(prev, next) => + if (prev.toInclusive != next.fromExclusive) + throw new IllegalStateException( + s"Periods in active snapshots $runningCmtSnapshotsForCatchUp are not consecutive" + ) + case _ => + } + + runningCmtSnapshotsForCatchUp + .map { case (period, snapshot) => + if (period.fromExclusive < fromExclusiveSeconds) + throw new IllegalStateException( + s"Wrong fromExclusive ${period.fromExclusive} in the active commitment snapshots, min is $fromExclusiveSeconds" + ) + if (period.toInclusive > toInclusiveSeconds) + throw new IllegalStateException( + s"Wrong toInclusive ${period.toInclusive} in the active commitment snapshots, max is $toInclusiveSeconds" + ) + + for { + cmts <- commitments( + participantId, + snapshot, + domainCrypto, + period.toInclusive, + Some(metrics), + threadCount, + cachedCommitmentsForRetroactiveSends, + filterInParticipantId, + filterOutParticipantId, + ) + + _ = logger.debug( + s"Due to mismatch, sending commitment for period $period to counterP ${cmts.keySet}" + ) + + msgs <- cmts + .collect { + case (counterParticipant, cmt) if LtHash16.isNonEmptyCommitment(cmt) => + signCommitment(cryptoSnapshot, counterParticipant, cmt, period).map(msg => + (counterParticipant, msg) + ) + } + .toList + .sequence + .map(_.toMap) + _ <- storeCommitments(msgs) + // TODO(i15333) batch more commitments and handle the case when we reach the maximum message limit. + _ = sendCommitmentMessages(period, msgs) + } yield () + } + .toSeq + .sequence_ + } + override protected def onClosed(): Unit = { Lifecycle.close(dbQueue, publishQueue)(logger) } @@ -1188,10 +1453,22 @@ class AcsCommitmentProcessor( // flatMap instead of zip because the `publishQueue` pushes tasks into the `queue`, // so we must call `queue.flush()` only after everything in the `publishQueue` has been flushed. publishQueue.flush().flatMap(_ => dbQueue.flush()) + + private[canton] class AcsCommitmentProcessorHealth( + override val name: String, + override protected val associatedOnShutdownRunner: OnShutdownRunner, + override protected val logger: TracedLogger, + ) extends AtomicHealthComponent { + override protected def initialHealthState: ComponentHealthState = ComponentHealthState.Ok() + override def closingState: ComponentHealthState = + ComponentHealthState.failed(s"Disconnected from domain") + } } object AcsCommitmentProcessor extends HasLoggerName { + val healthName: String = "acs-commitment-processor" + type ProcessorType = ( CantonTimestamp, @@ -1280,12 +1557,12 @@ object AcsCommitmentProcessor extends HasLoggerName { def concatenate( contractHash: LfHash, contractId: LfContractId, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ): Array[Byte] = ( contractHash.bytes.toByteString // hash always 32 bytes long per lf.crypto.Hash.underlyingLength concat contractId.encodeDeterministically - concat transferCounter.fold(ByteString.EMPTY)(TransferCounter.encodeDeterministically) + concat TransferCounter.encodeDeterministically(transferCounter) ).toByteArray import com.digitalasset.canton.lfPartyOrdering blocking { @@ -1307,7 +1584,13 @@ object AcsCommitmentProcessor extends HasLoggerName { val sortedStakeholders = SortedSet(stakeholdersAndTransferCounter.stakeholders.toSeq*) val h = commitments.getOrElseUpdate(sortedStakeholders, LtHash16()) - h.remove(concatenate(hash, cid, stakeholdersAndTransferCounter.transferCounter)) + h.remove( + concatenate( + hash, + cid, + stakeholdersAndTransferCounter.transferCounter, + ) + ) loggingContext.debug( s"Removing from commitment deactivation cid $cid transferCounter ${stakeholdersAndTransferCounter.transferCounter}" ) @@ -1401,6 +1684,12 @@ object AcsCommitmentProcessor extends HasLoggerName { } } } + + def clear() = setCachedCommitments( + Map.empty[ParticipantId, AcsCommitment.CommitmentType], + Map.empty[SortedSet[LfPartyId], AcsCommitment.CommitmentType], + Map.empty[ParticipantId, Set[SortedSet[LfPartyId]]], + ) } /** Compute the ACS commitments at the given timestamp. @@ -1416,6 +1705,10 @@ object AcsCommitmentProcessor extends HasLoggerName { pruningMetrics: Option[PruningMetrics], parallelism: PositiveNumeric[Int], cachedCommitments: CachedCommitments, + // compute commitments just for includeCounterParticipantIds, if non-empty, otherwise for all counter-participants + filterInParticipantIds: Seq[ParticipantId] = Seq.empty, + // exclude from computing commitments excludeCounterParticipantIds, if non-empty, otherwise do not exclude anyone + filterOutParticipantIds: Seq[ParticipantId] = Seq.empty, )(implicit ec: ExecutionContext, traceContext: TraceContext, @@ -1431,7 +1724,17 @@ object AcsCommitmentProcessor extends HasLoggerName { parallelism, ) } yield { - val res = computeCommitmentsPerParticipant(byParticipant, cachedCommitments) + // compute commitments just for counterParticipantId, if defined, otherwise for all counter-participants + val includeCPs = + if (filterInParticipantIds.isEmpty) byParticipant.keys + else filterInParticipantIds + val finalCPs = includeCPs.toSet.diff(filterOutParticipantIds.toSet) + val res = computeCommitmentsPerParticipant( + byParticipant.filter { case (pid, _) => + finalCPs.contains(pid) + }, + cachedCommitments, + ) commitmentTimer.foreach(_.stop()) // update cached commitments cachedCommitments.setCachedCommitments( @@ -1702,7 +2005,7 @@ object AcsCommitmentProcessor extends HasLoggerName { } def lookupChangeMetadata( - activations: Map[LfContractId, TransferCounterO] + activations: Map[LfContractId, TransferCounter] ): Future[AcsChange] = { for { // TODO(i9270) extract magic numbers @@ -1734,7 +2037,10 @@ object AcsCommitmentProcessor extends HasLoggerName { namedLoggingContext.traceContext ) activations = activeContracts.map { case (cid, (toc, transferCounter)) => - (cid, transferCounter) + ( + cid, + transferCounter, + ) } change <- lookupChangeMetadata(activations) } yield { @@ -1831,5 +2137,45 @@ object AcsCommitmentProcessor extends HasLoggerName { final case class Warn(override val cause: String) extends Alarm(cause) } } + + trait AcsCommitmentDegradation extends CantonError + object DegradationError extends ErrorGroup { + + @Explanation( + "The participant is configured to engage catchup mode, however configuration is invalid to have any effect" + ) + @Resolution("Please update catchup mode to have a catchUpIntervalSkip higher than 1") + object AcsCommitmentDegradationWithIneffectiveConfig + extends ErrorCode( + id = "ACS_COMMITMENT_DEGRADATION_WITH_INEFFECTIVE_CONFIG", + ErrorCategory.BackgroundProcessDegradationWarning, + ) { + final case class Report()(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "The participant has activated catchup mode, however catchUpIntervalSkip is set to 1, so it will have no improvement." + ) + with AcsCommitmentDegradation + } + + @Explanation( + "The participant has detected that ACS computation is taking to long and trying to catch up." + ) + @Resolution("Catch up mode is enabled and the participant should recover on its own.") + object AcsCommitmentDegradation + extends ErrorCode( + id = "ACS_COMMITMENT_DEGRADATION", + ErrorCategory.BackgroundProcessDegradationWarning, + ) { + final case class Report()(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = + "The participant has activated ACS catchup mode to combat computation problem." + ) + with AcsCommitmentDegradation + } + } } } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActivationsDeactivationsConsistencyCheck.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActivationsDeactivationsConsistencyCheck.scala new file mode 100644 index 000000000..92189df05 --- /dev/null +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActivationsDeactivationsConsistencyCheck.scala @@ -0,0 +1,157 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package com.digitalasset.canton.participant.store + +import cats.syntax.functorFilter.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.participant.store.ActiveContractStore.ActivenessChangeDetail.{ + Add, + Archive, + Create, + Purge, +} +import com.digitalasset.canton.participant.store.ActiveContractStore.{ + AcsWarning, + ActivenessChangeDetail, + ChangeAfterArchival, + ChangeBeforeCreation, + ChangeType, + DoubleContractArchival, + DoubleContractCreation, +} +import com.digitalasset.canton.participant.util.TimeOfChange +import com.digitalasset.canton.protocol.LfContractId + +import scala.annotation.tailrec + +@SuppressWarnings(Array("org.wartremover.warts.Var")) +object ActivationsDeactivationsConsistencyCheck { + + /** Checks whether a new change is consistent with previous changes + * @param cid ID of the contract + * @param toc Time of change of the new change + * @param changes All the changes, ordered by time of change. If two changes have the same toc, + * the activation should come before the deactivation. + * @return List of issues. + */ + def apply( + cid: LfContractId, + toc: TimeOfChange, + changes: NonEmpty[Seq[(TimeOfChange, ActivenessChangeDetail)]], + ): List[AcsWarning] = { + var latestCreateO: Option[TimeOfChange] = None + var earliestArchivalO: Option[TimeOfChange] = None + var latestArchivalOrPurgeO: Option[TimeOfChange] = None + + /* + We only generate warnings that relate to the new changes. Also, since the order of the two + time of change in the AcsWarning matters, we potentially need to reorder + */ + def existingToc(prevToc: TimeOfChange, currentToc: TimeOfChange) = + if (prevToc == toc) List(currentToc) + else if (currentToc == toc) List(prevToc) + else Nil + + def doubleContractArchival(prevArchival: TimeOfChange, currentToc: TimeOfChange) = { + existingToc(prevArchival, currentToc).map(DoubleContractArchival(cid, _, toc)) + } + + def doubleContractCreation(prevToc: TimeOfChange, currentToc: TimeOfChange) = + existingToc(prevToc, currentToc).map(DoubleContractCreation(cid, _, toc)) + + def updateStateVariables(change: ActivenessChangeDetail, toc: TimeOfChange): Unit = + change match { + case _: Create => latestCreateO = Some(toc) + case Archive => + if (earliestArchivalO.isEmpty) earliestArchivalO = Some(toc) + + latestArchivalOrPurgeO = Some(toc) + case Purge => latestArchivalOrPurgeO = Some(toc) + case _ => () + } + + /* + Checks whether change are consistent. Returns the first errors. + */ + @tailrec + def check( + changes: Iterator[(TimeOfChange, ActivenessChangeDetail)], + prevState: (TimeOfChange, ActivenessChangeDetail), + ): List[AcsWarning] = { + val (prevToc, prevChange) = prevState + + val isActive = prevChange.changeType match { + case ChangeType.Activation => true + case ChangeType.Deactivation => false + } + val isInactive = !isActive + + if (changes.hasNext) { + val (currentToc, currentChange) = changes.next() + + val doubleArchival = currentChange match { + case Archive => + earliestArchivalO.toList.flatMap(doubleContractArchival(_, currentToc)) + + case _ => Nil + } + + val changeAfterArchivalO = earliestArchivalO.toList + .map(ChangeAfterArchival(cid, _, currentToc)) + .filter(_.timeOfChanges.contains(toc)) + + val doubleCreation = currentChange match { + case Create(_) => + latestCreateO.toList.flatMap(doubleContractCreation(_, currentToc)) + case _ => Nil + } + + val changeBeforeCreation = currentChange match { + case _: Create => + existingToc(prevToc, currentToc).map(ChangeBeforeCreation(cid, _, toc)) + case _ => Nil + } + + val addPurge = currentChange match { + case _: Add if isActive => doubleContractCreation(prevToc, currentToc) + case Purge if isInactive => + latestArchivalOrPurgeO.toList.flatMap(doubleContractArchival(_, currentToc)) + + case _ => Nil + } + + updateStateVariables(currentChange, currentToc) + val warnings = + addPurge ++ doubleCreation ++ doubleArchival ++ changeAfterArchivalO ++ changeBeforeCreation + + if (warnings.nonEmpty) + warnings + else + check(changes, (currentToc, currentChange)) + } else + Nil + } + + val (firstToc, firstChange) = changes.head1 + updateStateVariables(firstChange, firstToc) + + val warnings = check(changes.tail1.iterator, changes.head1) + + val containsDoubleCreation = warnings.collectFirst { case _: DoubleContractCreation => + () + }.isDefined + + val containsDoubleArchival = warnings.collectFirst { case _: DoubleContractArchival => + () + }.isDefined + + val filteredWarnings = warnings.mapFilter { + case c: ChangeBeforeCreation => Option.when(!containsDoubleCreation)(c) + case c: ChangeAfterArchival => Option.when(!containsDoubleArchival)(c) + case other => Some(other) + } + + filteredWarnings + } +} diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala index cbac7008a..9404512be 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/ActiveContractStore.scala @@ -3,27 +3,28 @@ package com.digitalasset.canton.participant.store -import cats.kernel.Order import cats.syntax.foldable.* +import cats.syntax.parallel.* import com.daml.lf.data.Ref.PackageId +import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String100, String36} import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.ErrorLoggingContext import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.participant.store.ActiveContractSnapshot.ActiveContractIdsChange import com.digitalasset.canton.participant.util.{StateChange, TimeOfChange} -import com.digitalasset.canton.protocol.{ - LfContractId, - SourceDomainId, - TargetDomainId, - TransferDomainId, -} +import com.digitalasset.canton.protocol.{LfContractId, SourceDomainId, TargetDomainId} +import com.digitalasset.canton.store.db.DbDeserializationException +import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore} import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.{Checked, CheckedT} -import com.digitalasset.canton.{RequestCounter, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{RequestCounter, TransferCounter} import com.google.common.annotations.VisibleForTesting +import slick.jdbc.{GetResult, SetParameter} import scala.collection.immutable.SortedMap -import scala.concurrent.Future +import scala.concurrent.{ExecutionContext, Future} /**

The active contract store (ACS) stores for every contract ID * whether it is inexistent, [[ActiveContractStore.Active]], @@ -50,7 +51,7 @@ import scala.concurrent.Future * If the future returned by a call completes and observing the completion happens before another call, * then all changes of the former call must be ordered before all changes of the later call.

* - *

Bulk methods like [[ActiveContractStore.markContractsActive]] and [[ActiveContractStore.archiveContracts]] + *

Bulk methods like [[ActiveContractStore.markContractsCreated]] and [[ActiveContractStore.archiveContracts]] * generate one individual change for each contract. * So their changes may be interleaved with other calls.

* @@ -62,6 +63,7 @@ trait ActiveContractStore import ActiveContractStore.* override protected def kind: String = "active contract journal entries" + private[store] def indexedStringStore: IndexedStringStore /** Marks the given contracts as active from `timestamp` (inclusive) onwards. * @@ -82,15 +84,40 @@ trait ActiveContractStore *
  • [[ActiveContractStore.ChangeAfterArchival]] if this creation is later than the earliest archival of the contract.
  • * */ - def markContractsActive(contracts: Seq[(LfContractId, TransferCounterO)], toc: TimeOfChange)( + def markContractsCreated(contracts: Seq[(LfContractId, TransferCounter)], toc: TimeOfChange)( implicit traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] + ): CheckedT[Future, AcsError, AcsWarning, Unit] = + markContractsCreatedOrAdded(contracts, toc: TimeOfChange, isCreation = true) + + /** Shorthand for `markContractsCreated(Seq(contract), toc)` */ + def markContractCreated(contract: (LfContractId, TransferCounter), toc: TimeOfChange)(implicit + traceContext: TraceContext + ): CheckedT[Future, AcsError, AcsWarning, Unit] = + markContractsCreatedOrAdded(Seq(contract), toc, isCreation = true) - /** Shorthand for `markContractsActive(Seq(contractId), toc)` */ - def markContractActive(contract: (LfContractId, TransferCounterO), toc: TimeOfChange)(implicit + /** Shorthand for `markContractAdded(Seq(contract), toc)` */ + def markContractAdded(contract: (LfContractId, TransferCounter), toc: TimeOfChange)(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = - markContractsActive(Seq(contract), toc) + markContractsAdded(Seq(contract), toc: TimeOfChange) + + /** Marks the given contracts as active from `timestamp` (inclusive) onwards. + * + * Unlike creation, add can be done several times in the life of a contract. + * It is intended to use from the repair service. + */ + def markContractsAdded(contracts: Seq[(LfContractId, TransferCounter)], toc: TimeOfChange)( + implicit traceContext: TraceContext + ): CheckedT[Future, AcsError, AcsWarning, Unit] = + markContractsCreatedOrAdded(contracts, toc: TimeOfChange, isCreation = false) + + protected def markContractsCreatedOrAdded( + contracts: Seq[(LfContractId, TransferCounter)], + toc: TimeOfChange, + isCreation: Boolean, // true if create, false if add + )(implicit + traceContext: TraceContext + ): CheckedT[Future, AcsError, AcsWarning, Unit] /** Marks the given contracts as archived from `toc`'s timestamp (inclusive) onwards. * @@ -124,14 +151,41 @@ trait ActiveContractStore */ def archiveContracts(contractIds: Seq[LfContractId], toc: TimeOfChange)(implicit traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] + ): CheckedT[Future, AcsError, AcsWarning, Unit] = + purgeOrArchiveContracts(contractIds, toc, isArchival = true) - /** Shorthand for `archiveContracts(Seq(contractId), toc)` */ + /** Shorthand for `archiveContracts(Seq(cid), toc)` */ def archiveContract(cid: LfContractId, toc: TimeOfChange)(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = archiveContracts(Seq(cid), toc) + /** Shorthand for `purgeContracts(Seq(cid), toc)` */ + def purgeContract(cid: LfContractId, toc: TimeOfChange)(implicit + traceContext: TraceContext + ): CheckedT[Future, AcsError, AcsWarning, Unit] = + purgeOrArchiveContracts(Seq(cid), toc, isArchival = false) + + /** Marks the given contracts as inactive from `timestamp` (inclusive) onwards. + * + * Unlike archival, purge can be done several times in the life of a contract. + * It is intended to use from the repair service. + */ + def purgeContracts(contractIds: Seq[LfContractId], toc: TimeOfChange)(implicit + traceContext: TraceContext + ): CheckedT[Future, AcsError, AcsWarning, Unit] = + purgeOrArchiveContracts(contractIds, toc, isArchival = false) + + /** Depending on the `isArchival`, will archive (effect of a Daml transaction) or purge (repair service) + */ + protected def purgeOrArchiveContracts( + contractIds: Seq[LfContractId], + toc: TimeOfChange, + isArchival: Boolean, + )(implicit + traceContext: TraceContext + ): CheckedT[Future, AcsError, AcsWarning, Unit] + /** Returns the latest [[com.digitalasset.canton.participant.store.ActiveContractStore.Status]] * for the given contract IDs along with its [[com.digitalasset.canton.participant.util.TimeOfChange]]. * @@ -166,14 +220,14 @@ trait ActiveContractStore * */ def transferInContracts( - transferIns: Seq[(LfContractId, SourceDomainId, TransferCounterO, TimeOfChange)] + transferIns: Seq[(LfContractId, SourceDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext): CheckedT[Future, AcsError, AcsWarning, Unit] def transferInContract( contractId: LfContractId, toc: TimeOfChange, sourceDomain: SourceDomainId, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = @@ -193,14 +247,14 @@ trait ActiveContractStore * */ def transferOutContracts( - transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounterO, TimeOfChange)] + transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext): CheckedT[Future, AcsError, AcsWarning, Unit] def transferOutContract( contractId: LfContractId, toc: TimeOfChange, targetDomain: TargetDomainId, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = @@ -240,6 +294,32 @@ trait ActiveContractStore private[participant] def contractCount(timestamp: CantonTimestamp)(implicit traceContext: TraceContext ): Future[Int] + + protected def domainIdFromIdx( + idx: Int + )(implicit ec: ExecutionContext, loggingContext: ErrorLoggingContext): Future[DomainId] = + IndexedDomain + .fromDbIndexOT("par_active_contracts remote domain index", indexedStringStore)(idx) + .map(_.domainId) + .value + .flatMap { + case Some(domainId) => Future.successful(domainId) + case None => + Future.failed( + new RuntimeException(s"Unable to find domain ID for domain with index $idx") + ) + } + + protected def getDomainIndices( + domains: Seq[DomainId] + ): CheckedT[Future, AcsError, AcsWarning, Map[DomainId, IndexedDomain]] = + CheckedT.result( + domains + .parTraverse { domainId => + IndexedDomain.indexed(indexedStringStore)(domainId).map(domainId -> _) + } + .map(_.toMap) + ) } object ActiveContractStore { @@ -247,61 +327,205 @@ object ActiveContractStore { type ContractState = StateChange[Status] val ContractState: StateChange.type = StateChange - sealed trait ActivenessChangeDetail extends Product with Serializable { - def unwrap: Option[DomainId] - def isTransfer: Boolean + sealed trait ChangeType { + def name: String - val transferCounter: TransferCounterO + // lazy val so that `kind` is initialized first in the subclasses + final lazy val toDbPrimitive: String100 = + // The Oracle DB schema allows up to 100 chars; Postgres, H2 map this to an enum + String100.tryCreate(name) } - final case class TransferDetails( - remoteDomainId: DomainId, - transferCounter: TransferCounterO, - ) extends ActivenessChangeDetail { - override def unwrap: Option[DomainId] = Some(remoteDomainId) - override def isTransfer: Boolean = true + object ChangeType { + case object Activation extends ChangeType { + override val name = "activation" + } + + case object Deactivation extends ChangeType { + override val name = "deactivation" + } + + implicit val setParameterChangeType: SetParameter[ChangeType] = (v, pp) => pp >> v.toDbPrimitive + implicit val getResultChangeType: GetResult[ChangeType] = GetResult(r => + r.nextString() match { + case ChangeType.Activation.name => ChangeType.Activation + case ChangeType.Deactivation.name => ChangeType.Deactivation + case unknown => throw new DbDeserializationException(s"Unknown change type [$unknown]") + } + ) } - object TransferDetails { - def apply(domain: TransferDomainId, transferCounter: TransferCounterO): TransferDetails = - TransferDetails(domain.unwrap, transferCounter) + sealed trait ActivenessChangeDetail extends Product with Serializable { + def name: LengthLimitedString + + def transferCounterO: Option[TransferCounter] + def remoteDomainIdxO: Option[Int] + + def changeType: ChangeType + def contractChange: ContractChange + + def isTransfer: Boolean } object ActivenessChangeDetail { + val create: String36 = String36.tryCreate("create") + val archive: String36 = String36.tryCreate("archive") + val add: String36 = String36.tryCreate("add") + val purge: String36 = String36.tryCreate("purge") + val transferIn: String36 = String36.tryCreate("transfer-in") + val transferOut: String36 = String36.tryCreate("transfer-out") + + sealed trait HasTransferCounter extends ActivenessChangeDetail { + def transferCounter: TransferCounter + override def transferCounterO: Option[TransferCounter] = Some(transferCounter) + def toStateChangeType: StateChangeType = StateChangeType(contractChange, transferCounter) + } + + sealed trait TransferChangeDetail extends HasTransferCounter { + def toTransferType: ActiveContractStore.TransferType + def remoteDomainIdx: Int + override def remoteDomainIdxO: Option[Int] = Some(remoteDomainIdx) + + override def isTransfer: Boolean = true + } + + final case class Create(transferCounter: TransferCounter) extends HasTransferCounter { + override val name = ActivenessChangeDetail.create + override def transferCounterO: Option[TransferCounter] = Some(transferCounter) + + override def remoteDomainIdxO: Option[Int] = None + + override def changeType: ChangeType = ChangeType.Activation + + override def contractChange: ContractChange = ContractChange.Created + + override def isTransfer: Boolean = false + } + + final case class Add(transferCounter: TransferCounter) extends HasTransferCounter { + override val name = ActivenessChangeDetail.add + override def remoteDomainIdxO: Option[Int] = None + + override def changeType: ChangeType = ChangeType.Activation + + override def contractChange: ContractChange = ContractChange.Created + + override def isTransfer: Boolean = false + } + + /** The transfer counter for archivals stored in the acs is always None, because we cannot + * determine the correct transfer counter when the contract is archived. + * We only determine the transfer counter later, when the record order publisher triggers the + * computation of acs commitments, but we never store it in the acs. + */ + case object Archive extends ActivenessChangeDetail { + override val name = ActivenessChangeDetail.archive + override def transferCounterO: Option[TransferCounter] = None + override def remoteDomainIdxO: Option[Int] = None + override def changeType: ChangeType = ChangeType.Deactivation + + override def contractChange: ContractChange = ContractChange.Archived + + override def isTransfer: Boolean = false + } + + case object Purge extends ActivenessChangeDetail { + override val name = ActivenessChangeDetail.purge + override def transferCounterO: Option[TransferCounter] = None + + override def remoteDomainIdxO: Option[Int] = None + + override def changeType: ChangeType = ChangeType.Deactivation + + override def contractChange: ContractChange = ContractChange.Created + + override def isTransfer: Boolean = false + } + + final case class TransferIn(transferCounter: TransferCounter, remoteDomainIdx: Int) + extends TransferChangeDetail { + override val name = ActivenessChangeDetail.transferIn + + override def changeType: ChangeType = ChangeType.Activation + override def toTransferType: ActiveContractStore.TransferType = + ActiveContractStore.TransferType.TransferIn + + override def contractChange: ContractChange = ContractChange.TransferredIn + } + + final case class TransferOut(transferCounter: TransferCounter, remoteDomainIdx: Int) + extends TransferChangeDetail { + override val name = ActivenessChangeDetail.transferOut + + override def changeType: ChangeType = ChangeType.Deactivation + override def toTransferType: ActiveContractStore.TransferType = + ActiveContractStore.TransferType.TransferOut + + override def contractChange: ContractChange = ContractChange.TransferredOut + } - def apply( - domainIdO: Option[DomainId], - transferCounter: TransferCounterO, - ): ActivenessChangeDetail = - domainIdO match { - case None => CreationArchivalDetail(transferCounter) - case Some(domainId) => TransferDetails(domainId, transferCounter) + implicit val setParameterActivenessChangeDetail: SetParameter[ActivenessChangeDetail] = + (v, pp) => { + pp >> v.name + pp >> v.transferCounterO + pp >> v.remoteDomainIdxO } - private[store] implicit val orderForActivenessChangeDetail: Order[ActivenessChangeDetail] = - Order.by[ActivenessChangeDetail, Option[DomainId]](_.unwrap) - } + implicit val getResultChangeType: GetResult[ActivenessChangeDetail] = GetResult { r => + val operationName = r.nextString() + val transferCounterO = GetResult[Option[TransferCounter]].apply(r) + val remoteDomainO = r.nextIntOption() + + if (operationName == ActivenessChangeDetail.create.str) { + val transferCounter = transferCounterO.getOrElse( + throw new DbDeserializationException("transfer counter should be defined for a create") + ) + + ActivenessChangeDetail.Create(transferCounter) + } else if (operationName == ActivenessChangeDetail.archive.str) { + ActivenessChangeDetail.Archive + } else if (operationName == ActivenessChangeDetail.add.str) { + val transferCounter = transferCounterO.getOrElse( + throw new DbDeserializationException("transfer counter should be defined for an add") + ) + + ActivenessChangeDetail.Add(transferCounter) + } else if (operationName == ActivenessChangeDetail.purge.str) { + ActivenessChangeDetail.Purge + } else if (operationName == "transfer-in" || operationName == "transfer-out") { + val transferCounter = transferCounterO.getOrElse( + throw new DbDeserializationException( + s"transfer counter should be defined for a $operationName" + ) + ) - /** The transfer counter for archivals stored in the acs is always None, because we cannot - * determine the correct transfer counter when the contract is archived. - * We only determine the transfer counter later, when the record order publisher triggers the - * computation of acs commitments, but we never store it in the acs. - */ - final case class CreationArchivalDetail(transferCounter: TransferCounterO) - extends ActivenessChangeDetail { - override def unwrap: None.type = None + val remoteDomain = remoteDomainO.getOrElse( + throw new DbDeserializationException( + s"remote domain should be defined for a $operationName" + ) + ) - override def isTransfer: Boolean = false + if (operationName == ActivenessChangeDetail.transferIn.str) + ActivenessChangeDetail.TransferIn(transferCounter, remoteDomain) + else + ActivenessChangeDetail.TransferOut(transferCounter, remoteDomain) + } else throw new DbDeserializationException(s"Unknown operation type [$operationName]") + } } sealed trait AcsBaseError extends Product with Serializable /** Warning cases returned by the operations on the [[ActiveContractStore!]] */ - sealed trait AcsWarning extends AcsBaseError + sealed trait AcsWarning extends AcsBaseError { + // List of toc involved in the error + def timeOfChanges: List[TimeOfChange] + } /** Error cases returned by the operations on the [[ActiveContractStore!]] */ trait AcsError extends AcsBaseError + final case class UnableToFindIndex(id: DomainId) extends AcsError + final case class ActiveContractsDataInvariantViolation( errorMessage: String ) extends AcsError @@ -312,7 +536,9 @@ object ActiveContractStore { toc: TimeOfChange, detail1: ActivenessChangeDetail, detail2: ActivenessChangeDetail, - ) extends AcsWarning + ) extends AcsWarning { + override def timeOfChanges: List[TimeOfChange] = List(toc) + } /** A contract is simultaneously archived and/or transferred out to possibly several source domains */ final case class SimultaneousDeactivation( @@ -320,35 +546,46 @@ object ActiveContractStore { toc: TimeOfChange, detail1: ActivenessChangeDetail, detail2: ActivenessChangeDetail, - ) extends AcsWarning + ) extends AcsWarning { + override def timeOfChanges: List[TimeOfChange] = List(toc) + } /** The given contract is archived a second time, but with a different time of change. */ final case class DoubleContractArchival( contractId: LfContractId, oldTime: TimeOfChange, newTime: TimeOfChange, - ) extends AcsWarning + ) extends AcsWarning { + override def timeOfChanges: List[TimeOfChange] = List(oldTime, newTime) + } /** The given contract is created a second time, but with a different time of change. */ final case class DoubleContractCreation( contractId: LfContractId, oldTime: TimeOfChange, newTime: TimeOfChange, - ) extends AcsWarning + ) extends AcsWarning { + override def timeOfChanges: List[TimeOfChange] = List(oldTime, newTime) + + } /** The state of a contract is changed before its `creation`. */ final case class ChangeBeforeCreation( contractId: LfContractId, creation: TimeOfChange, change: TimeOfChange, - ) extends AcsWarning + ) extends AcsWarning { + override def timeOfChanges: List[TimeOfChange] = List(creation, change) + } /** The state of a contract is changed after its `archival`. */ final case class ChangeAfterArchival( contractId: LfContractId, archival: TimeOfChange, change: TimeOfChange, - ) extends AcsWarning + ) extends AcsWarning { + override def timeOfChanges: List[TimeOfChange] = List(archival, change) + } /** TransferCounter should increase monotonically with the time of change. */ final case class TransferCounterShouldIncrease( @@ -359,6 +596,8 @@ object ActiveContractStore { nextToc: TimeOfChange, strict: Boolean, ) extends AcsWarning { + override def timeOfChanges: List[TimeOfChange] = List(currentToc, nextToc) + def reason: String = s"""The transfer counter $current of the contract state at $currentToc should be smaller than ${if ( strict @@ -375,25 +614,28 @@ object ActiveContractStore { case Active(_) => true case _ => false } - - val transferCounter: TransferCounterO } /** The contract has been created and is active. */ - final case class Active(transferCounter: TransferCounterO) extends Status { + final case class Active(tc: TransferCounter) extends Status { override def prunable: Boolean = false override def pretty: Pretty[Active] = prettyOfClass( - paramIfDefined("transfer counter", _.transferCounter) + param("transfer counter", _.transferCounter) ) + def transferCounter: TransferCounter = tc } /** The contract has been archived and it is not active. */ case object Archived extends Status { override def prunable: Boolean = true override def pretty: Pretty[Archived.type] = prettyOfObject[Archived.type] - override val transferCounter: TransferCounterO = - None // transfer counters remain None, because we do not write them back to the acs + // transfer counter remains None, because we do not write it back to the ACS + } + + case object Purged extends Status { + override def prunable: Boolean = true + override def pretty: Pretty[Purged.type] = prettyOfObject[Purged.type] } /** The contract has been transferred out to the given `targetDomain` after it had resided on this domain. @@ -406,11 +648,11 @@ object ActiveContractStore { *
  • The contract is active or archived on any other domain.
  • * * - * @param transferCounter If defined, this is the transfer counter of the transfer-out request that transferred the contract away. + * @param transferCounter The transfer counter of the transfer-out request that transferred the contract away. */ final case class TransferredAway( targetDomain: TargetDomainId, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ) extends Status { override def prunable: Boolean = true override def pretty: Pretty[TransferredAway] = prettyOfClass(unnamedParam(_.targetDomain)) @@ -429,7 +671,7 @@ object ActiveContractStore { } private[store] final case class TransferCounterAtChangeInfo( timeOfChange: TimeOfChange, - transferCounter: TransferCounterO, + transferCounter: Option[TransferCounter], ) private[store] def checkTransferCounterAgainstLatestBefore( @@ -437,7 +679,6 @@ object ActiveContractStore { timeOfChange: TimeOfChange, transferCounter: TransferCounter, latestBeforeO: Option[TransferCounterAtChangeInfo], - transferType: TransferType, ): Checked[Nothing, TransferCounterShouldIncrease, Unit] = latestBeforeO.flatMap { latestBefore => latestBefore.transferCounter.map { previousTransferCounter => @@ -511,7 +752,7 @@ trait ActiveContractSnapshot { */ def snapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)]] + ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounter)]] /** Returns all contracts that were active right after the given request counter, * and when the contract became active for the last time before or at the given request counter. @@ -533,7 +774,7 @@ trait ActiveContractSnapshot { */ def snapshot(rc: RequestCounter)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounterO)]] + ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounter)]] /** Returns Some(contractId) if an active contract belonging to package `pkg` exists, otherwise returns None. * The returned contractId may be any active contract from package `pkg`. @@ -560,18 +801,20 @@ trait ActiveContractSnapshot { ): Future[Map[LfContractId, CantonTimestamp]] /** Returns a map to the latest transfer counter of the contract before the given request counter. - * If the contract does not exist in the ACS, it returns a None. + * Fails if not all given contract ids are active in the ACS, or if the ACS does not have defined their latest transfer counter. * * @param requestCounter The request counter *immediately before* which the state of the contracts shall be determined. * - * @throws java.lang.IllegalArgumentException if `requestCounter` is equal to RequestCounter.MinValue`. + * @throws java.lang.IllegalArgumentException if `requestCounter` is equal to RequestCounter.MinValue`, + * if not all given contract ids are active in the ACS, + * if the ACS does not contain the latest transfer counter for each given contract id. */ def bulkContractsTransferCounterSnapshot( contractIds: Set[LfContractId], requestCounter: RequestCounter, )(implicit traceContext: TraceContext - ): Future[Map[LfContractId, TransferCounterO]] + ): Future[Map[LfContractId, TransferCounter]] /** Returns all changes to the active contract set between the two timestamps * (exclusive lower bound timestamp, inclusive upper bound timestamp) @@ -622,15 +865,19 @@ sealed trait ContractChange extends Product with Serializable with PrettyPrintin object ContractChange { case object Created extends ContractChange case object Archived extends ContractChange - case object Unassigned extends ContractChange - case object Assigned extends ContractChange + case object Purged extends ContractChange + case object TransferredOut extends ContractChange + case object TransferredIn extends ContractChange } /** Type of state change of a contract as returned by [[com.digitalasset.canton.participant.store.ActiveContractStore.changesBetween]] * through a [[com.digitalasset.canton.participant.store.ActiveContractSnapshot.ActiveContractIdsChange]] */ -final case class StateChangeType(change: ContractChange, transferCounter: TransferCounterO) +final case class StateChangeType(change: ContractChange, transferCounter: TransferCounter) extends PrettyPrinting { override def pretty: Pretty[StateChangeType] = - prettyOfClass(param("", _.change), paramIfDefined("", _.transferCounter)) + prettyOfClass( + param("operation", _.change), + param("transfer counter", _.transferCounter), + ) } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala index 8e44ff7d2..8d9766903 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/SyncDomainPersistentState.scala @@ -14,8 +14,8 @@ import com.digitalasset.canton.crypto.{Crypto, CryptoPureApi} import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.config.ParticipantStoreConfig import com.digitalasset.canton.participant.store.EventLogId.DomainEventLogId -import com.digitalasset.canton.participant.store.db.DbSyncDomainPersistentStateX -import com.digitalasset.canton.participant.store.memory.InMemorySyncDomainPersistentStateX +import com.digitalasset.canton.participant.store.db.DbSyncDomainPersistentState +import com.digitalasset.canton.participant.store.memory.InMemorySyncDomainPersistentState import com.digitalasset.canton.resource.{DbStorage, MemoryStorage, Storage} import com.digitalasset.canton.store.* import com.digitalasset.canton.time.Clock @@ -75,19 +75,20 @@ object SyncDomainPersistentState { val domainLoggerFactory = loggerFactory.append("domainId", domainId.domainId.toString) storage match { case _: MemoryStorage => - new InMemorySyncDomainPersistentStateX( + new InMemorySyncDomainPersistentState( clock, crypto, domainId, protocolVersion, enableAdditionalConsistencyChecks, topologyXConfig.enableTopologyTransactionValidation, + indexedStringStore, domainLoggerFactory, processingTimeouts, futureSupervisor, ) case db: DbStorage => - new DbSyncDomainPersistentStateX( + new DbSyncDomainPersistentState( domainId, protocolVersion, clock, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/data/ActiveContractsData.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/data/ActiveContractsData.scala index 51857f8b1..867164c14 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/data/ActiveContractsData.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/data/ActiveContractsData.scala @@ -4,12 +4,12 @@ package com.digitalasset.canton.participant.store.data import cats.syntax.either.* -import com.digitalasset.canton.TransferCounterO +import com.digitalasset.canton.TransferCounter import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.LfContractId import com.digitalasset.canton.version.ProtocolVersion -final case class ActiveContractData(contractId: LfContractId, transferCounter: TransferCounterO) +final case class ActiveContractData(contractId: LfContractId, transferCounter: TransferCounter) final case class ActiveContractsData private ( protocolVersion: ProtocolVersion, @@ -17,15 +17,10 @@ final case class ActiveContractsData private ( contracts: Iterable[ActiveContractData], ) { - require( - contracts.forall(tc => tc.transferCounter.isDefined), - s"The reassignment counter must be defined for protocol version '${ProtocolVersion.v30}' or higher.", - ) - def contractIds: Seq[LfContractId] = contracts.map(_.contractId).toSeq - def asMap: Map[LfContractId, (TransferCounterO, TimeOfChange)] = - contracts.view.map(tc => tc.contractId -> (tc.transferCounter, toc)).toMap + def asMap: Map[(LfContractId, TimeOfChange), TransferCounter] = + contracts.view.map(tc => (tc.contractId, toc) -> tc.transferCounter).toMap def asSeq: Seq[ActiveContractData] = contracts.toSeq @@ -37,7 +32,7 @@ object ActiveContractsData { def create( protocolVersion: ProtocolVersion, toc: TimeOfChange, - contracts: Seq[(LfContractId, TransferCounterO)], + contracts: Seq[(LfContractId, TransferCounter)], ): Either[String, ActiveContractsData] = { Either .catchOnly[IllegalArgumentException]( diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala index 5a4cd3f14..d86cf6de7 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStore.scala @@ -3,22 +3,27 @@ package com.digitalasset.canton.participant.store.db -import cats.data.{Chain, EitherT} +import cats.data.{EitherT, NonEmptyChain} import cats.syntax.either.* import cats.syntax.foldable.* +import cats.syntax.functor.* +import cats.syntax.functorFilter.* import cats.syntax.parallel.* +import cats.syntax.traverse.* import com.daml.lf.data.Ref.PackageId import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.CantonRequireTypes.String100 +import com.digitalasset.canton.config.CantonRequireTypes.{LengthLimitedString, String100} import com.digitalasset.canton.config.ProcessingTimeout import com.digitalasset.canton.config.RequireTypes.PositiveNumeric import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.store.ActiveContractSnapshot.ActiveContractIdsChange +import com.digitalasset.canton.participant.store.ActiveContractStore.ActivenessChangeDetail.* import com.digitalasset.canton.participant.store.data.ActiveContractsData import com.digitalasset.canton.participant.store.db.DbActiveContractStore.* import com.digitalasset.canton.participant.store.{ + ActivationsDeactivationsConsistencyCheck, ActiveContractStore, ContractChange, ContractStore, @@ -40,12 +45,11 @@ import com.digitalasset.canton.resource.DbStorage.* import com.digitalasset.canton.resource.{DbStorage, DbStore} import com.digitalasset.canton.store.db.{DbDeserializationException, DbPrunableByTimeDomain} import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore, PrunableByTimeParameters} -import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.{Checked, CheckedT, ErrorUtil, IterableUtil} +import com.digitalasset.canton.util.{Checked, CheckedT, ErrorUtil, IterableUtil, MonadUtil} import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{RequestCounter, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{RequestCounter, TransferCounter} import slick.jdbc.* import slick.jdbc.canton.SQLActionBuilder @@ -71,7 +75,7 @@ class DbActiveContractStore( enableAdditionalConsistencyChecks: Boolean, maxContractIdSqlInListSize: PositiveNumeric[Int], batchingParametersConfig: PrunableByTimeParameters, - indexedStringStore: IndexedStringStore, + val indexedStringStore: IndexedStringStore, protocolVersion: ProtocolVersion, override protected val timeouts: ProcessingTimeout, override protected val loggerFactory: NamedLoggerFactory, @@ -90,6 +94,9 @@ class DbActiveContractStore( protected[this] override val pruning_status_table = "par_active_contract_pruning" + private def checkedTUnit: CheckedT[Future, AcsError, AcsWarning, Unit] = + CheckedT.resultT[Future, AcsError, AcsWarning](()) + /* Consider the scenario where a contract is created on domain D1, then transferred to D2, then to D3 and is finally archived. We will have the corresponding entries in the ActiveContractStore: @@ -98,108 +105,102 @@ class DbActiveContractStore( - On D3, remoteDomain will initially be Some(D2) and then None (after the archival). */ private case class StoredActiveContract( - change: ChangeType, - timestamp: CantonTimestamp, - rc: RequestCounter, - remoteDomainIdIndex: Option[Int], - transferCounter: TransferCounterO, + activenessChange: ActivenessChangeDetail, + toc: TimeOfChange, ) { - def toContractState(implicit ec: ExecutionContext): Future[ContractState] = { - val statusF = change match { - case ChangeType.Activation => - Future.successful(Active(transferCounter)) - - case ChangeType.Deactivation => - // In case of a deactivation, then `remoteDomainIdIndex` is non-empty iff it is a transfer-out, - // in which case the corresponding domain is the target domain. - // The same holds for `remoteDomainIdF`. - remoteDomainIdF.map { - case Some(domainId) => TransferredAway(TargetDomainId(domainId), transferCounter) - case None => Archived - } + def toContractState(implicit + ec: ExecutionContext, + traceContext: TraceContext, + ): Future[ContractState] = { + val statusF = activenessChange match { + case Create(transferCounter) => Future.successful(Active(transferCounter)) + case Archive => Future.successful(Archived) + case Add(transferCounter) => Future.successful(Active(transferCounter)) + case Purge => Future.successful(Purged) + case in: TransferIn => Future.successful(Active(in.transferCounter)) + case out: TransferOut => + domainIdFromIdx(out.remoteDomainIdx).map(id => + TransferredAway(TargetDomainId(id), out.transferCounter) + ) } - statusF.map(ContractState(_, rc, timestamp)) - } - private def remoteDomainIdF: Future[Option[DomainId]] = { - remoteDomainIdIndex.fold(Future.successful(None: Option[DomainId])) { index => - import TraceContext.Implicits.Empty.* - IndexedDomain - .fromDbIndexOT("par_active_contracts remote domain index", indexedStringStore)(index) - .map(_.domainId) - .value - } + statusF.map(ContractState(_, toc.rc, toc.timestamp)) } def toTransferCounterAtChangeInfo: TransferCounterAtChangeInfo = - TransferCounterAtChangeInfo(TimeOfChange(rc, timestamp), transferCounter) + TransferCounterAtChangeInfo(toc, activenessChange.transferCounterO) } private implicit val getResultStoredActiveContract: GetResult[StoredActiveContract] = - GetResult(r => - StoredActiveContract( - ChangeType.getResultChangeType(r), - GetResult[CantonTimestamp].apply(r), - GetResult[RequestCounter].apply(r), - r.nextIntOption(), - GetResult[TransferCounterO].apply(r), - ) - ) + GetResult { r => + val activenessChange = GetResult[ActivenessChangeDetail].apply(r) + val ts = GetResult[CantonTimestamp].apply(r) + val rc = GetResult[RequestCounter].apply(r) - def markContractsActive(contracts: Seq[(LfContractId, TransferCounterO)], toc: TimeOfChange)( - implicit traceContext: TraceContext + StoredActiveContract(activenessChange, TimeOfChange(rc, ts)) + } + + override def markContractsCreatedOrAdded( + contracts: Seq[(LfContractId, TransferCounter)], + toc: TimeOfChange, + isCreation: Boolean, + )(implicit + traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = { - { - for { - activeContractsData <- CheckedT.fromEitherT( - EitherT.fromEither[Future]( - ActiveContractsData - .create(protocolVersion, toc, contracts) - .leftMap(errorMessage => ActiveContractsDataInvariantViolation(errorMessage)) - ) - ) - _ <- bulkInsert( - activeContractsData.asMap, - ChangeType.Activation, - remoteDomain = None, + val (operationName, builder) = + if (isCreation) (ActivenessChangeDetail.create, ActivenessChangeDetail.Create(_)) + else (ActivenessChangeDetail.add, ActivenessChangeDetail.Add(_)) + + for { + activeContractsData <- CheckedT.fromEitherT( + EitherT.fromEither[Future]( + ActiveContractsData + .create(protocolVersion, toc, contracts) + .leftMap(errorMessage => ActiveContractsDataInvariantViolation(errorMessage)) ) - _ <- - if (enableAdditionalConsistencyChecks) { - performUnlessClosingCheckedT( - "additional-consistency-check", - Checked.result[AcsError, AcsWarning, Unit]( - logger.debug( - "Could not perform additional consistency check because node is shutting down" - ) - ), - ) { - activeContractsData.asSeq.parTraverse_ { tc => - for { - _ <- checkCreateArchiveAtUnique( - tc.contractId, - activeContractsData.toc, - ChangeType.Activation, - ) - _ <- checkChangesBeforeCreation(tc.contractId, activeContractsData.toc) - _ <- checkTocAgainstEarliestArchival(tc.contractId, activeContractsData.toc) - } yield () - } + ) + _ <- bulkInsert( + activeContractsData.asMap.fmap(builder), + change = ChangeType.Activation, + operationName = operationName, + ) + _ <- + if (enableAdditionalConsistencyChecks) { + performUnlessClosingCheckedT( + "additional-consistency-check", + Checked.result[AcsError, AcsWarning, Unit]( + logger.debug( + "Could not perform additional consistency check because node is shutting down" + ) + ), + ) { + activeContractsData.asSeq.parTraverse_ { tc => + checkActivationsDeactivationConsistency( + tc.contractId, + activeContractsData.toc, + ) } - } else { - CheckedT.resultT[Future, AcsError, AcsWarning](()) } - } yield () - } + } else checkedTUnit + } yield () } - def archiveContracts(contracts: Seq[LfContractId], toc: TimeOfChange)(implicit + override def purgeOrArchiveContracts( + contracts: Seq[LfContractId], + toc: TimeOfChange, + isArchival: Boolean, + )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = { + val (operationName, operation) = + if (isArchival) (ActivenessChangeDetail.archive, ActivenessChangeDetail.Archive) + else (ActivenessChangeDetail.purge, ActivenessChangeDetail.Purge) + for { _ <- bulkInsert( - contracts.map(cid => (cid, (None: TransferCounterO, toc))).toMap, - ChangeType.Deactivation, - remoteDomain = None, + contracts.map(cid => ((cid, toc), operation)).toMap, + change = ChangeType.Deactivation, + operationName = operationName, ) _ <- if (enableAdditionalConsistencyChecks) { @@ -213,80 +214,77 @@ class DbActiveContractStore( ) { contracts.parTraverse_ { contractId => for { - _ <- checkCreateArchiveAtUnique(contractId, toc, ChangeType.Deactivation) - _ <- checkChangesAfterArchival(contractId, toc) - _ <- checkTocAgainstLatestCreation(contractId, toc) + _ <- checkActivationsDeactivationConsistency(contractId, toc) } yield () } } - } else { - CheckedT.resultT[Future, AcsError, AcsWarning](()) - } + } else checkedTUnit } yield () } - private def indexedDomains( - contractByDomain: Seq[ - (TransferDomainId, Seq[(LfContractId, (TransferCounterO, TimeOfChange))]) - ] - ): CheckedT[Future, AcsError, AcsWarning, Seq[ - (IndexedDomain, Seq[(LfContractId, (TransferCounterO, TimeOfChange))]) - ]] = - CheckedT.result(contractByDomain.parTraverse { case (domainId, contracts) => - IndexedDomain - .indexed(indexedStringStore)(domainId.unwrap) - .map(_ -> contracts) - }) - - override def transferInContracts( - transferIns: Seq[(LfContractId, SourceDomainId, TransferCounterO, TimeOfChange)] + private def transferContracts( + transfers: Seq[(LfContractId, TransferDomainId, TransferCounter, TimeOfChange)], + builder: (TransferCounter, Int) => TransferChangeDetail, + change: ChangeType, + operationName: LengthLimitedString, )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = { - val bySourceDomainIndexed = - transferIns.groupMap { case (_, sourceDomain, _, _) => sourceDomain } { - case (id, _, transferCounter, toc) => (id, (transferCounter, toc)) - }.toSeq + val domains = transfers.map { case (_, domain, _, _) => domain.unwrap }.distinct + + type PreparedTransfer = ((LfContractId, TimeOfChange), TransferChangeDetail) + for { - indexedSourceDomain <- indexedDomains(bySourceDomainIndexed) - _ <- indexedSourceDomain.parTraverse_ { case (sourceDomain, contracts) => - bulkInsert( - contracts.toMap, - ChangeType.Activation, - remoteDomain = Some(sourceDomain), - ) + domainIndices <- getDomainIndices(domains) + + preparedTransfersE = MonadUtil.sequentialTraverse( + transfers + ) { case (cid, remoteDomain, transferCounter, toc) => + domainIndices + .get(remoteDomain.unwrap) + .toRight[AcsError](UnableToFindIndex(remoteDomain.unwrap)) + .map(idx => ((cid, toc), builder(transferCounter, idx.index))) } - _ <- checkTransfersConsistency( - transferIns.map { case (transfer, _, counter, toc) => (transfer, counter, toc) }, - OperationType.TransferIn, + + preparedTransfers <- CheckedT.fromChecked(Checked.fromEither(preparedTransfersE)): CheckedT[ + Future, + AcsError, + AcsWarning, + Seq[PreparedTransfer], + ] + + _ <- bulkInsert( + preparedTransfers.toMap, + change, + operationName = operationName, ) + + _ <- checkTransfersConsistency(preparedTransfers) } yield () } + override def transferInContracts( + transferIns: Seq[(LfContractId, SourceDomainId, TransferCounter, TimeOfChange)] + )(implicit + traceContext: TraceContext + ): CheckedT[Future, AcsError, AcsWarning, Unit] = + transferContracts( + transferIns, + TransferIn.apply, + ChangeType.Activation, + ActivenessChangeDetail.transferIn, + ) + override def transferOutContracts( - transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounterO, TimeOfChange)] + transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = { - val byTargetDomains = - transferOuts.groupMap { case (_, targetDomain, _, _) => targetDomain } { - case (id, _, transferCounter, toc) => (id, (transferCounter, toc)) - }.toSeq - for { - byTargetIndexed <- indexedDomains(byTargetDomains) - _ <- byTargetIndexed.parTraverse_ { case (targetDomain, contracts) => - bulkInsert( - contracts.toMap, - ChangeType.Deactivation, - remoteDomain = Some(targetDomain), - ) - } - _ <- checkTransfersConsistency( - transferOuts.map { case (cid, _, counter, toc) => (cid, counter, toc) }, - OperationType.TransferOut, - ) - } yield () - } + ): CheckedT[Future, AcsError, AcsWarning, Unit] = transferContracts( + transferOuts, + TransferOut.apply, + ChangeType.Deactivation, + ActivenessChangeDetail.transferOut, + ) override def fetchStates( contractIds: Iterable[LfContractId] @@ -299,9 +297,7 @@ class DbActiveContractStore( .parTraverseFilter { contractId => storage .querySingle(fetchContractStateQuery(contractId), functionFullName) - .semiflatMap(storedContract => { - storedContract.toContractState.map(res => (contractId -> res)) - }) + .semiflatMap(_.toContractState.map(res => (contractId -> res))) .value } .map(_.toMap) @@ -317,14 +313,14 @@ class DbActiveContractStore( .map { inClause => val query = sql""" - with ordered_changes(contract_id, change, ts, request_counter, remote_domain_id, transfer_counter, row_num) as ( - select contract_id, change, ts, request_counter, remote_domain_id, transfer_counter, + with ordered_changes(contract_id, operation, transfer_counter, remote_domain_idx, ts, request_counter, row_num) as ( + select contract_id, operation, transfer_counter, remote_domain_idx, ts, request_counter, ROW_NUMBER() OVER (partition by domain_id, contract_id order by ts desc, request_counter desc, change asc) from par_active_contracts where domain_id = $domainId and """ ++ inClause ++ sql""" ) - select contract_id, change, ts, request_counter, remote_domain_id, transfer_counter + select contract_id, operation, transfer_counter, remote_domain_idx, ts, request_counter from ordered_changes where row_num = 1; """ @@ -363,8 +359,8 @@ class DbActiveContractStore( val query = (sql""" - with ordered_changes(contract_id, package_id, change, ts, request_counter, remote_domain_id, row_num) as ( - select par_active_contracts.contract_id, par_contracts.package_id, change, ts, par_active_contracts.request_counter, remote_domain_id, + with ordered_changes(contract_id, package_id, change, ts, request_counter, remote_domain_idx, row_num) as ( + select par_active_contracts.contract_id, par_contracts.package_id, change, ts, par_active_contracts.request_counter, remote_domain_idx, ROW_NUMBER() OVER ( partition by par_active_contracts.domain_id, par_active_contracts.contract_id order by @@ -391,7 +387,7 @@ class DbActiveContractStore( override def snapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)]] = { + ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounter)]] = { logger.debug(s"Obtaining ACS snapshot at $timestamp") storage .query( @@ -407,7 +403,7 @@ class DbActiveContractStore( override def snapshot(rc: RequestCounter)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounterO)]] = { + ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounter)]] = { logger.debug(s"Obtaining ACS snapshot at $rc") storage .query( @@ -441,7 +437,7 @@ class DbActiveContractStore( requestCounter: RequestCounter, )(implicit traceContext: TraceContext - ): Future[Map[LfContractId, TransferCounterO]] = { + ): Future[Map[LfContractId, TransferCounter]] = { logger.debug( s"Looking up transfer counters for contracts $contractIds up to but not including $requestCounter" ) @@ -467,8 +463,15 @@ class DbActiveContractStore( } } yield { contractIds - .map(k => (k, acsArchivalContracts.get(k).flatten)) - .toMap + .diff(acsArchivalContracts.keySet) + .foreach(cid => + ErrorUtil.internalError( + new IllegalStateException( + s"Archived non-transient contract $cid should have been active in the ACS and have a transfer counter defined" + ) + ) + ) + acsArchivalContracts } } } @@ -476,7 +479,7 @@ class DbActiveContractStore( private[this] def snapshotQuery[T]( p: SnapshotQueryParameter[T], contractIds: Option[Set[LfContractId]], - ): DbAction.ReadOnly[Seq[(LfContractId, T, TransferCounterO)]] = { + ): DbAction.ReadOnly[Seq[(LfContractId, T, TransferCounter)]] = { import DbStorage.Implicits.BuilderChain.* val idsO = contractIds.map { ids => @@ -501,7 +504,7 @@ class DbActiveContractStore( or (AC.ts = AC2.ts and AC.request_counter = AC2.request_counter and AC2.change = ${ChangeType.Deactivation}))) and AC.#${p.attribute} <= ${p.bound} and domain_id = $domainId""" ++ idsO.fold(sql"")(ids => sql" and AC.contract_id in " ++ ids) ++ ordering) - .as[(LfContractId, T, TransferCounterO)] + .as[(LfContractId, T, TransferCounter)] case _: DbStorage.Profile.Postgres => (sql""" select distinct(contract_id), AC3.#${p.attribute}, AC3.transfer_counter from par_active_contracts AC1 @@ -511,7 +514,7 @@ class DbActiveContractStore( .limit(1)}) as AC3 on true where AC1.domain_id = $domainId and AC3.change = CAST(${ChangeType.Activation} as change_type)""" ++ idsO.fold(sql"")(ids => sql" and AC1.contract_id in " ++ ids) ++ ordering) - .as[(LfContractId, T, TransferCounterO)] + .as[(LfContractId, T, TransferCounter)] case _: DbStorage.Profile.Oracle => (sql"""select distinct(contract_id), AC3.#${p.attribute}, AC3.transfer_counter from par_active_contracts AC1, lateral (select #${p.attribute}, change, transfer_counter from par_active_contracts AC2 where domain_id = $domainId @@ -520,7 +523,7 @@ class DbActiveContractStore( fetch first 1 row only) AC3 where AC1.domain_id = $domainId and AC3.change = 'activation'""" ++ idsO.fold(sql"")(ids => sql" and AC1.contract_id in " ++ ids) ++ ordering) - .as[(LfContractId, T, TransferCounterO)] + .as[(LfContractId, T, TransferCounter)] } } @@ -629,6 +632,57 @@ class DbActiveContractStore( } yield nrPruned).onShutdown(0) } + /* Computes the maximum transfer counter for each contract in the `res` vector. + The computation for max_transferCounter(`rc`, `cid`) reuses the result of max_transferCounter(`rc-1`, `cid`). + + Assumption: the input `res` is already sorted by request counter. + */ + /* + TODO(i12904): Here we compute the maximum of the previous transfer counters; + instead, we could retrieve the transfer counter of the latest activation + */ + private def transferCounterForArchivals( + res: Iterable[(TimeOfChange, LfContractId, ActivenessChangeDetail)] + ): Map[(RequestCounter, LfContractId), Option[TransferCounter]] = { + res + .groupBy { case (_, cid, _) => cid } + .flatMap { case (cid, changes) => + val sortedChangesByRc = changes.collect { + case (TimeOfChange(rc, _), _, change) + if change.name != ActivenessChangeDetail.transferOut => + ((rc, cid), change) + }.toList + + NonEmpty.from(sortedChangesByRc) match { + case None => List.empty + case Some(changes) => + val ((rc, cid), op) = changes.head1 + val initial = ((rc, cid), (op.transferCounterO, op)) + + changes.tail1.scanLeft(initial) { + case ( + ((_, _), (accTransferCounter, _)), + ((crtRc, cid), change), + ) => + ( + (crtRc, cid), + ( + Ordering[Option[TransferCounter]].max( + accTransferCounter, + change.transferCounterO, + ), + change, + ), + ) + } + } + } + .collect { + case ((rc, cid), (transferCounter, Archive)) => ((rc, cid), transferCounter) + case ((rc, cid), (transferCounter, Purge)) => ((rc, cid), transferCounter) + } + } + def deleteSince(criterion: RequestCounter)(implicit traceContext: TraceContext): Future[Unit] = { val query = sqlu"delete from par_active_contracts where domain_id = $domainId and request_counter >= $criterion" @@ -649,78 +703,13 @@ class DbActiveContractStore( case _: DbStorage.Profile.Oracle => "asc" case _ => "desc" } - sql"""select ts, request_counter, contract_id, change, transfer_counter, operation + sql"""select ts, request_counter, contract_id, operation, transfer_counter, remote_domain_idx from par_active_contracts where domain_id = $domainId and ((ts = ${fromExclusive.timestamp} and request_counter > ${fromExclusive.rc}) or ts > ${fromExclusive.timestamp}) and ((ts = ${toInclusive.timestamp} and request_counter <= ${toInclusive.rc}) or ts <= ${toInclusive.timestamp}) order by ts asc, request_counter asc, change #$changeOrder""" - }.as[ - ( - CantonTimestamp, - RequestCounter, - LfContractId, - ChangeType, - TransferCounterO, - OperationType, - ) - ] - - /* Computes the maximum transfer counter for each contract in the `res` vector, up to a certain request counter `rc`. - The computation for max_transferCounter(`rc`, `cid`) reuses the result of max_transferCounter(`rc-1`, `cid`). - - Assumption: the input `res` is already sorted by request counter. - */ - /* - TODO(i12904): Here we compute the maximum of the previous transfer counters; - instead, we could retrieve the transfer counter of the latest activation - */ - - def transferCounterForArchivals( - res: Iterable[ - ( - CantonTimestamp, - RequestCounter, - LfContractId, - ChangeType, - TransferCounterO, - OperationType, - ) - ] - ): Map[(RequestCounter, LfContractId), TransferCounterO] = { - val groupedByCid = res.groupBy { case (_, _, cid, _, _, _) => cid } - val maxTransferCountersPerCidUpToRc = groupedByCid - .flatMap { case (cid, changes) => - val sortedChangesByRc = changes.collect { - case (_, rc, _, _, transferCounter, opType) if opType != OperationType.TransferOut => - ((rc, cid), (transferCounter, opType)) - }.toList - - NonEmpty.from(sortedChangesByRc) match { - case None => List.empty - case Some(changes) => - changes.tail1.scanLeft(changes.head1) { - case ( - ((_, _), (accTransferCounter, _)), - ((crtRc, cid), (crtTransferCounter, opType)), - ) => - ( - (crtRc, cid), - ( - Ordering[TransferCounterO].max(accTransferCounter, crtTransferCounter), - opType, - ), - ) - } - } - } - .collect { - case ((rc, cid), (transferCounter, opType)) if opType == OperationType.Archive => - ((rc, cid), transferCounter) - } - - maxTransferCountersPerCidUpToRc - } + }.as[(TimeOfChange, LfContractId, ActivenessChangeDetail)] for { retrievedChangesBetween <- storage.query( @@ -738,14 +727,14 @@ class DbActiveContractStore( */ // retrieves the transfer counters for archived contracts that were activated at time <= `fromExclusive` maxTransferCountersPerRemainingCidUpToRc <- { - val archivalsWithoutTransferCounters = - maxTransferCountersPerCidUpToRc.filter(_._2.isEmpty) + val archivalsWithoutTransferCounters = maxTransferCountersPerCidUpToRc.filter(_._2.isEmpty) + NonEmpty .from(archivalsWithoutTransferCounters.map { case ((_, contractId), _) => contractId }.toSeq) .fold( - Future.successful(Map.empty[(RequestCounter, LfContractId), TransferCounterO]) + Future.successful(Map.empty[(RequestCounter, LfContractId), Option[TransferCounter]]) ) { cids => val maximumRc = archivalsWithoutTransferCounters @@ -762,21 +751,12 @@ class DbActiveContractStore( // function `transferCounterForArchivals` and obtain the transfer counters for (rc, cid) pairs. // One could have a more restrictive query and compute the transfer counters in some other way. .map { inClause => - (sql"""select ts, request_counter, contract_id, change, transfer_counter, operation + (sql"""select ts, request_counter, contract_id, operation, transfer_counter, remote_domain_idx from par_active_contracts where domain_id = $domainId and (request_counter <= $maximumRc) and (ts <= ${toInclusive.timestamp}) and """ ++ inClause ++ sql""" order by ts asc, request_counter asc""") - .as[ - ( - CantonTimestamp, - RequestCounter, - LfContractId, - ChangeType, - TransferCounterO, - OperationType, - ) - ] + .as[(TimeOfChange, LfContractId, ActivenessChangeDetail)] } val resultArchivalTransferCounters = storage .sequentialQueryAndCombine( @@ -784,49 +764,77 @@ class DbActiveContractStore( "ACS: get data to compute the transfer counters for archived contracts", ) - resultArchivalTransferCounters.map { r => - transferCounterForArchivals(r) - } + resultArchivalTransferCounters.map(transferCounterForArchivals) } } - } yield { - // filter None entries from maxTransferCountersPerCidUpToRc, as the transfer counters for - // those contracts are now in remainingMaxTransferCountersPerCidUpToRc - val definedMaxTransferCountersPerCidUpToRc = maxTransferCountersPerCidUpToRc.filter { - case ((_, _), transferCounter) => transferCounter.isDefined - } - val groupedByTs = - IterableUtil.spansBy(retrievedChangesBetween)(entry => (entry._1, entry._2)) - groupedByTs.map { case ((ts, rc), changes) => - val (acts, deacts) = changes.partition { case (_, _, _, changeType, _, _) => - changeType == ChangeType.Activation - } - TimeOfChange(rc, ts) -> ActiveContractIdsChange( - acts.map { case (_, _, cid, _, transferCounter, opType) => - if (opType == OperationType.Create) - (cid, StateChangeType(ContractChange.Created, transferCounter)) - else - (cid, StateChangeType(ContractChange.Assigned, transferCounter)) - }.toMap, - deacts.map { case (_, requestCounter, cid, _, transferCounter, opType) => - if (opType == OperationType.Archive) { - ( - cid, - StateChangeType( - ContractChange.Archived, - definedMaxTransferCountersPerCidUpToRc.getOrElse( - (requestCounter, cid), - maxTransferCountersPerRemainingCidUpToRc - .getOrElse((requestCounter, cid), transferCounter), - ), - ), - ) - } else (cid, StateChangeType(ContractChange.Unassigned, transferCounter)) - }.toMap, - ) - } + res <- combineTransferCounters( + maxTransferCountersPerRemainingCidUpToRc = maxTransferCountersPerRemainingCidUpToRc, + maxTransferCountersPerCidUpToRc = maxTransferCountersPerCidUpToRc, + retrievedChangesBetween = retrievedChangesBetween, + ) + + } yield res + } + + private def combineTransferCounters( + maxTransferCountersPerRemainingCidUpToRc: Map[ + (RequestCounter, LfContractId), + Option[TransferCounter], + ], + maxTransferCountersPerCidUpToRc: Map[(RequestCounter, LfContractId), Option[TransferCounter]], + retrievedChangesBetween: Seq[(TimeOfChange, LfContractId, ActivenessChangeDetail)], + ): Future[LazyList[(TimeOfChange, ActiveContractIdsChange)]] = { + // filter None entries from maxTransferCountersPerCidUpToRc, as the transfer counters for + // those contracts are now in remainingMaxTransferCountersPerCidUpToRc + val definedMaxTransferCountersPerCidUpToRc = maxTransferCountersPerCidUpToRc.collect { + case (key, Some(transferCounter)) => (key, transferCounter) } + + type AccType = (LfContractId, StateChangeType) + val empty = Vector.empty[AccType] + + IterableUtil + .spansBy(retrievedChangesBetween) { case (toc, _, _) => toc } + .traverse { case (toc, changes) => + val resE = changes.forgetNE + .foldLeftM[Either[String, *], (Vector[AccType], Vector[AccType])]((empty, empty)) { + case ((acts, deacts), (_, cid, change)) => + change match { + case create: Create => + Right((acts :+ (cid, create.toStateChangeType), deacts)) + + case in: TransferIn => + Right((acts :+ (cid, in.toStateChangeType), deacts)) + case out: TransferOut => + Right((acts, deacts :+ (cid, out.toStateChangeType))) + + case add: Add => + Right((acts :+ (cid, add.toStateChangeType), deacts)) + + case Archive | Purge => + val transferCounterE = definedMaxTransferCountersPerCidUpToRc + .get((toc.rc, cid)) + .orElse( + maxTransferCountersPerRemainingCidUpToRc.get((toc.rc, cid)).flatten + ) + .toRight(s"Unable to find transfer counter for $cid at $toc") + + transferCounterE.map { transferCounter => + val newChange = ( + cid, + StateChangeType(ContractChange.Archived, transferCounter), + ) + + (acts, deacts :+ newChange) + } + } + } + + resE.map { case (acts, deacts) => toc -> ActiveContractIdsChange(acts.toMap, deacts.toMap) } + } + .bimap(err => Future.failed(new IllegalStateException(err)), Future.successful) + .merge } override private[participant] def contractCount( @@ -841,145 +849,56 @@ class DbActiveContractStore( } private def checkTransfersConsistency( - transfers: Seq[(LfContractId, TransferCounterO, TimeOfChange)], - operation: TransferOperationType, + transfers: Seq[((LfContractId, TimeOfChange), TransferChangeDetail)] )(implicit traceContext: TraceContext): CheckedT[Future, AcsError, AcsWarning, Unit] = if (enableAdditionalConsistencyChecks) { - transfers.parTraverse_ { case (contractId, transferCounter, toc) => + transfers.parTraverse_ { case ((contractId, toc), transfer) => for { - _ <- checkTocAgainstLatestCreation(contractId, toc) - _ <- transferCounter - .traverse_(tc => checkTransferCountersShouldIncrease(contractId, toc, tc, operation)) - _ <- checkTocAgainstEarliestArchival(contractId, toc) + _ <- checkTransferCountersShouldIncrease(contractId, toc, transfer) + _ <- checkActivationsDeactivationConsistency(contractId, toc) } yield () } } else CheckedT.pure(()) - private def checkChangesBeforeCreation(contractId: LfContractId, toc: TimeOfChange)(implicit - traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = { - val query = - storage.profile match { - case _: DbStorage.Profile.Oracle => - sql"""select ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId - and (ts < ${toc.timestamp} or (ts = ${toc.timestamp} and request_counter < ${toc.rc})) and operation != ${OperationType.Create} - order by ts desc, request_counter desc, change asc""" - case _ => - sql"""select ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId - and (ts, request_counter) < (${toc.timestamp}, ${toc.rc}) and operation != CAST(${OperationType.Create} as operation_type) - order by (ts, request_counter, change) desc""" - } - - val result = storage.query(query.as[(CantonTimestamp, RequestCounter)], functionFullName) - - CheckedT(result.map { changes => - val warnings = changes.map { case (changeTs, changeRc) => - ChangeBeforeCreation(contractId, toc, TimeOfChange(changeRc, changeTs)) - } - Checked.unit.appendNonaborts(Chain.fromSeq(warnings)) - }) - } - - private def checkChangesAfterArchival(contractId: LfContractId, toc: TimeOfChange)(implicit - traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = { - - val q = - storage.profile match { - case _: DbStorage.Profile.Oracle => - sql"""select ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId - and (ts > ${toc.timestamp} or (ts = ${toc.timestamp} and request_counter > ${toc.rc})) and operation != ${OperationType.Archive} - order by ts desc, request_counter desc, change asc""" - - case _ => - sql"""select ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId - and (ts, request_counter) > (${toc.timestamp}, ${toc.rc}) and operation != CAST(${OperationType.Archive} as operation_type) - order by (ts, request_counter, change) desc""" - } - - val result = storage.query(q.as[(CantonTimestamp, RequestCounter)], functionFullName) - - CheckedT(result.map { changes => - val warnings = changes.map { case (changeTs, changeRc) => - ChangeAfterArchival(contractId, toc, TimeOfChange(changeRc, changeTs)) - } - Checked.unit.appendNonaborts(Chain.fromSeq(warnings)) - }) - } - - private def checkCreateArchiveAtUnique( + private def checkActivationsDeactivationConsistency( contractId: LfContractId, toc: TimeOfChange, - change: ChangeType, )(implicit traceContext: TraceContext): CheckedT[Future, AcsError, AcsWarning, Unit] = { - val operation = change match { - case ChangeType.Activation => OperationType.Create - case ChangeType.Deactivation => OperationType.Archive - } - val order = change match { - case ChangeType.Activation => "desc" // find the latest creation - case ChangeType.Deactivation => "asc" // find the earliest archival + + val query = storage.profile match { + case _: DbStorage.Profile.Oracle => + throw new IllegalArgumentException("Implement for oracle") + case _ => + // change desc allows to have activations first + sql"""select operation, transfer_counter, remote_domain_idx, ts, request_counter from par_active_contracts + where domain_id = $domainId and contract_id = $contractId + order by ts asc, request_counter asc, change desc""" } - val q = - storage.profile match { - case _: DbStorage.Profile.Oracle => - sql""" - select ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId - and (ts <> ${toc.timestamp} or request_counter <> ${toc.rc}) - and change = $change - and operation = $operation - order by ts #$order, request_counter #$order - #${storage.limit(1)} - """ - case _ => - sql""" - select ts, request_counter from par_active_contracts - where domain_id = $domainId and contract_id = $contractId - and (ts, request_counter) <> (${toc.timestamp}, ${toc.rc}) - and change = CAST($change as change_type) - and operation = CAST($operation as operation_type) - order by (ts, request_counter) #$order - #${storage.limit(1)} - """ + val changesF: Future[Vector[StoredActiveContract]] = + storage.query(query.as[StoredActiveContract], functionFullName) - } - val query = q.as[(CantonTimestamp, RequestCounter)] - CheckedT(storage.query(query, functionFullName).map { changes => - changes.headOption.fold(Checked.unit[AcsError, AcsWarning]) { case (changeTs, changeRc) => - val warn = - if (change == ChangeType.Activation) - DoubleContractCreation(contractId, TimeOfChange(changeRc, changeTs), toc) - else DoubleContractArchival(contractId, TimeOfChange(changeRc, changeTs), toc) - Checked.continue(warn) + val checkedUnit = Checked.unit[AcsError, AcsWarning] + + CheckedT(changesF.map { changes => + NonEmpty.from(changes).fold(checkedUnit) { changes => + NonEmptyChain + .fromSeq( + ActivationsDeactivationsConsistencyCheck( + contractId, + toc, + changes.map(c => (c.toc, c.activenessChange)), + ) + ) + .fold(checkedUnit)(Checked.continues) } }) } - /** Check that the given [[com.digitalasset.canton.participant.util.TimeOfChange]] - * is not before the latest creation. Otherwise return a [[ChangeBeforeCreation]]. - */ - private def checkTocAgainstLatestCreation(contractId: LfContractId, toc: TimeOfChange)(implicit - traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = - CheckedT(storage.query(fetchLatestCreation(contractId), functionFullName).map { - case None => Checked.unit - case Some(StoredActiveContract(_, ts, rc, _, _)) => - val storedToc = TimeOfChange(rc, ts) - if (storedToc > toc) Checked.continue(ChangeBeforeCreation(contractId, storedToc, toc)) - else Checked.unit - }) - private def checkTransferCountersShouldIncrease( contractId: LfContractId, toc: TimeOfChange, - transferCounter: TransferCounter, - transferType: TransferOperationType, + transfer: TransferChangeDetail, )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = CheckedT { @@ -1006,47 +925,25 @@ class DbActiveContractStore( _ <- ActiveContractStore.checkTransferCounterAgainstLatestBefore( contractId, toc, - transferCounter, + transfer.transferCounter, latestBeforeO.map(_.toTransferCounterAtChangeInfo), - transferType.toTransferType, ) _ <- ActiveContractStore.checkTransferCounterAgainstEarliestAfter( contractId, toc, - transferCounter, + transfer.transferCounter, earliestAfterO.map(_.toTransferCounterAtChangeInfo), - transferType.toTransferType, + transfer.toTransferType, ) } yield () } } - /** Check that the given [[com.digitalasset.canton.participant.util.TimeOfChange]] - * is not after the earliest archival. Otherwise return a [[ChangeAfterArchival]]. - */ - private def checkTocAgainstEarliestArchival(contractId: LfContractId, toc: TimeOfChange)(implicit - traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = - CheckedT(storage.query(fetchEarliestArchival(contractId), functionFullName).map { - case None => Checked.unit - case Some(StoredActiveContract(_, ts, rc, _, _)) => - val storedToc = TimeOfChange(rc, ts) - if (storedToc < toc) Checked.continue(ChangeAfterArchival(contractId, storedToc, toc)) - else Checked.unit - }) - private def bulkInsert( - contractIdsWithTransferCounter: Map[LfContractId, (TransferCounterO, TimeOfChange)], + contractChanges: Map[(LfContractId, TimeOfChange), ActivenessChangeDetail], change: ChangeType, - remoteDomain: Option[IndexedDomain], + operationName: LengthLimitedString, )(implicit traceContext: TraceContext): CheckedT[Future, AcsError, AcsWarning, Unit] = { - val operation = change match { - case ChangeType.Activation => - if (remoteDomain.isEmpty) OperationType.Create else OperationType.TransferIn - case ChangeType.Deactivation => - if (remoteDomain.isEmpty) OperationType.Archive else OperationType.TransferOut - } - val insertQuery = storage.profile match { case _: DbStorage.Profile.Oracle => """merge /*+ INDEX ( par_active_contracts ( contract_id, ts, request_counter, change, domain_id, transfer_counter ) ) */ @@ -1056,25 +953,22 @@ class DbActiveContractStore( | par_active_contracts.request_counter = input.request_counter and par_active_contracts.change = input.change and | par_active_contracts.domain_id = input.domain_id) |when not matched then - | insert (contract_id, ts, request_counter, change, domain_id, operation, remote_domain_id, transfer_counter) + | insert (contract_id, ts, request_counter, change, domain_id, operation, transfer_counter, remote_domain_idx) | values (input.contract_id, input.ts, input.request_counter, input.change, input.domain_id, ?, ?, ?)""".stripMargin case _: DbStorage.Profile.H2 | _: DbStorage.Profile.Postgres => - """insert into par_active_contracts(contract_id, ts, request_counter, change, domain_id, operation, remote_domain_id, transfer_counter) + """insert into par_active_contracts(contract_id, ts, request_counter, change, domain_id, operation, transfer_counter, remote_domain_idx) values (?, ?, ?, CAST(? as change_type), ?, CAST(? as operation_type), ?, ?) on conflict do nothing""" } val insertAll = - DbStorage.bulkOperation_(insertQuery, contractIdsWithTransferCounter, storage.profile) { - pp => contractIdWithTransferCounter => - val (contractId, (transferCounter, toc)) = contractIdWithTransferCounter - pp >> contractId - pp >> toc.timestamp - pp >> toc.rc - pp >> change - pp >> domainId - pp >> operation - pp >> remoteDomain - pp >> transferCounter + DbStorage.bulkOperation_(insertQuery, contractChanges, storage.profile) { pp => element => + val ((contractId, toc), operationType) = element + pp >> contractId + pp >> toc.timestamp + pp >> toc.rc + pp >> operationType.changeType + pp >> domainId + pp >> operationType } @SuppressWarnings(Array("org.wartremover.warts.OptionPartial")) @@ -1116,73 +1010,61 @@ class DbActiveContractStore( tssInClause: SQLActionBuilderChain, ) = storage.profile match { case _: DbStorage.Profile.Oracle => - sql"select contract_id, remote_domain_id, transfer_counter, request_counter, ts from par_active_contracts where domain_id = $domainId and " ++ cidsInClause ++ - sql" and " ++ tssInClause ++ sql" and " ++ rcsInClause ++ sql" and change = $change and (operation <> $operation or " ++ - (if (remoteDomain.isEmpty) sql"remote_domain_id is not null" - else sql"remote_domain_id <> $remoteDomain") ++ sql")" + sql"select contract_id, operation, transfer_counter, remote_domain_idx, ts, request_counter from par_active_contracts where domain_id = $domainId and " ++ cidsInClause ++ + sql" and " ++ tssInClause ++ sql" and " ++ rcsInClause ++ sql" and change = $change" case _ => - sql"select contract_id, remote_domain_id, transfer_counter, request_counter, ts from par_active_contracts where domain_id = $domainId and " ++ cidsInClause ++ - sql" and " ++ tssInClause ++ sql" and " ++ rcsInClause ++ sql" and change = CAST($change as change_type) and (operation <> CAST($operation as operation_type) or " ++ - (if (remoteDomain.isEmpty) sql"remote_domain_id is not null" - else sql"remote_domain_id <> $remoteDomain") ++ sql")" + sql"select contract_id, operation, transfer_counter, remote_domain_idx, ts, request_counter from par_active_contracts where domain_id = $domainId and " ++ cidsInClause ++ + sql" and " ++ tssInClause ++ sql" and " ++ rcsInClause ++ sql" and change = CAST($change as change_type)" } val queries = contractIdsNotInsertedInClauses.zip(rcsInClauses).zip(tssInClauses).map { case ((cidsInClause, rcsInClause), tssInClause) => query(cidsInClause, rcsInClause, tssInClause) - .as[(LfContractId, Option[Int], TransferCounterO, RequestCounter, CantonTimestamp)] + .as[(LfContractId, ActivenessChangeDetail, TimeOfChange)] } - val results = storage + + val isActivation = change == ChangeType.Activation + + val warningsF = storage .sequentialQueryAndCombine(queries, functionFullName) - .flatMap(_.toList.parTraverseFilter { - case (cid, Some(remoteIdx), transferCounter, rc, ts) => - IndexedDomain - .fromDbIndexOT("active_contracts", indexedStringStore)(remoteIdx) - .map { indexed => - (cid, TransferDetails(indexed.item, transferCounter), TimeOfChange(rc, ts)) - } - .value - case (cid, None, transferCounter, rc, ts) => - Future.successful( - Some((cid, CreationArchivalDetail(transferCounter), TimeOfChange(rc, ts))) - ) - }) + .map(_.toList.mapFilter { case (cid, previousOperationType, toc) => + val newOperationType = contractChanges.getOrElse((cid, toc), previousOperationType) - CheckedT(results.map { presentWithOtherValues => - val isActivation = change == ChangeType.Activation - presentWithOtherValues.traverse_ { case (contractId, previousDetail, toc) => - val transferCounter = contractIdsWithTransferCounter.get(contractId).flatMap(_._1) - val detail = ActivenessChangeDetail(remoteDomain.map(_.item), transferCounter) - val warn = + if (newOperationType == previousOperationType) + None + else { if (isActivation) - SimultaneousActivation( - contractId, - toc, - previousDetail, - detail, + Some( + SimultaneousActivation( + cid, + toc, + previousOperationType, + newOperationType, + ) ) else - SimultaneousDeactivation( - contractId, - toc, - previousDetail, - detail, + Some( + SimultaneousDeactivation( + cid, + toc, + previousOperationType, + newOperationType, + ) ) - Checked.continue(warn) - } - }) + } + }) + + CheckedT(warningsF.map(_.traverse_(Checked.continue))) } CheckedT.result(storage.queryAndUpdate(insertAll, functionFullName)).flatMap { (_: Unit) => if (enableAdditionalConsistencyChecks) { // Check all contracts whether they have been inserted or are already there - // We don't analyze the update counts + // We don't analyze the update count // so that we can use the fast IGNORE_ROW_ON_DUPKEY_INDEX directive in Oracle NonEmpty - .from(contractIdsWithTransferCounter.view.map { case (cid, (_, toc)) => - (cid, toc) - }.toSeq) + .from(contractChanges.keySet.toSeq) .map(checkIdempotence) .getOrElse(CheckedT.pure(())) } else CheckedT.pure(()) @@ -1192,12 +1074,12 @@ class DbActiveContractStore( private def fetchLatestCreation( contractId: LfContractId ): DbAction.ReadOnly[Option[StoredActiveContract]] = - fetchContractStateQuery(contractId, Some(OperationType.Create)) + fetchContractStateQuery(contractId, Some(ActivenessChangeDetail.create)) private def fetchEarliestArchival( contractId: LfContractId ): DbAction.ReadOnly[Option[StoredActiveContract]] = - fetchContractStateQuery(contractId, Some(OperationType.Archive), descending = false) + fetchContractStateQuery(contractId, Some(ActivenessChangeDetail.archive), descending = false) private def fetchEarliestContractStateAfter( contractId: LfContractId, @@ -1218,7 +1100,7 @@ class DbActiveContractStore( private def fetchContractStateQuery( contractId: LfContractId, - operationFilter: Option[OperationType] = None, + operationFilter: Option[LengthLimitedString] = None, tocFilter: Option[TimeOfChange] = None, descending: Boolean = true, ): DbAction.ReadOnly[Option[StoredActiveContract]] = { @@ -1226,7 +1108,7 @@ class DbActiveContractStore( import DbStorage.Implicits.BuilderChain.* val baseQuery = - sql"""select change, ts, request_counter, remote_domain_id, transfer_counter from par_active_contracts + sql"""select operation, transfer_counter, remote_domain_idx, ts, request_counter from par_active_contracts where domain_id = $domainId and contract_id = $contractId""" val opFilterQuery = storage.profile match { @@ -1307,54 +1189,4 @@ private object DbActiveContractStore { } ) } - - sealed trait OperationType extends Product with Serializable { - val name: String - - // lazy val so that `kind` is initialized first in the subclasses - final lazy val toDbPrimitive: String100 = - // The Oracle DB schema allows up to 100 chars; Postgres, H2 map this to an enum - String100.tryCreate(name) - } - - sealed trait TransferOperationType extends OperationType { - def toTransferType: ActiveContractStore.TransferType - } - - object OperationType { - case object Create extends OperationType { - override val name = "create" - } - - case object Archive extends OperationType { - override val name = "archive" - } - - case object TransferIn extends TransferOperationType { - override val name = "transfer-in" - - override def toTransferType: ActiveContractStore.TransferType = - ActiveContractStore.TransferType.TransferIn - } - - case object TransferOut extends TransferOperationType { - override val name = "transfer-out" - - override def toTransferType: ActiveContractStore.TransferType = - ActiveContractStore.TransferType.TransferOut - } - - implicit val setParameterOperationType: SetParameter[OperationType] = (v, pp) => - pp >> v.toDbPrimitive - implicit val getResultChangeType: GetResult[OperationType] = GetResult(r => - r.nextString() match { - case OperationType.Create.name => OperationType.Create - case OperationType.Archive.name => OperationType.Archive - case OperationType.TransferIn.name => OperationType.TransferIn - case OperationType.TransferOut.name => OperationType.TransferOut - case unknown => throw new DbDeserializationException(s"Unknown operation type [$unknown]") - } - ) - } - } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala index 0a854f82f..e0164bd24 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/db/DbSyncDomainPersistentState.scala @@ -30,7 +30,7 @@ import com.digitalasset.canton.version.{ProtocolVersion, ReleaseProtocolVersion} import scala.concurrent.ExecutionContext -class DbSyncDomainPersistentStateX( +class DbSyncDomainPersistentState( override val domainId: IndexedDomain, val protocolVersion: ProtocolVersion, clock: Clock, diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala index 7075ad177..27bbb0829 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryAcsCommitmentStore.scala @@ -74,11 +74,10 @@ class InMemoryAcsCommitmentStore(protected val loggerFactory: NamedLoggerFactory ) } else { computed.update(counterParticipant, oldMap + (period -> commitment)) - Future.unit } } } - + Future.unit } override def getComputed(period: CommitmentPeriod, counterParticipant: ParticipantId)(implicit diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala index f91185517..60c89467d 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryActiveContractStore.scala @@ -3,17 +3,29 @@ package com.digitalasset.canton.participant.store.memory -import cats.data.Chain +import cats.data.NonEmptyChain import cats.kernel.Order import cats.syntax.foldable.* import cats.syntax.functor.* import cats.syntax.functorFilter.* +import cats.syntax.parallel.* +import cats.syntax.traverse.* import com.daml.lf.data.Ref.PackageId +import com.daml.nonempty.NonEmpty import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.store.ActiveContractSnapshot.ActiveContractIdsChange +import com.digitalasset.canton.participant.store.ActiveContractStore.ActivenessChangeDetail.{ + Add, + Archive, + Create, + Purge, + TransferIn, + TransferOut, +} import com.digitalasset.canton.participant.store.data.{ActiveContractData, ActiveContractsData} import com.digitalasset.canton.participant.store.{ + ActivationsDeactivationsConsistencyCheck, ActiveContractStore, ContractChange, ContractStore, @@ -21,14 +33,20 @@ import com.digitalasset.canton.participant.store.{ } import com.digitalasset.canton.participant.util.{StateChange, TimeOfChange} import com.digitalasset.canton.protocol.ContractIdSyntax.* -import com.digitalasset.canton.protocol.{LfContractId, SourceDomainId, TargetDomainId} +import com.digitalasset.canton.protocol.{ + LfContractId, + SourceDomainId, + TargetDomainId, + TransferDomainId, +} +import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.store.memory.InMemoryPrunableByTime -import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.ShowUtil.* import com.digitalasset.canton.util.* import com.digitalasset.canton.version.ProtocolVersion -import com.digitalasset.canton.{RequestCounter, TransferCounter, TransferCounterO} +import com.digitalasset.canton.{RequestCounter, TransferCounter} import java.util.ConcurrentModificationException import java.util.concurrent.atomic.AtomicInteger @@ -41,11 +59,11 @@ import scala.concurrent.{ExecutionContext, Future} /** Implements an [[ActiveContractStore!]] in memory. */ class InMemoryActiveContractStore( + val indexedStringStore: IndexedStringStore, protocolVersion: ProtocolVersion, override val loggerFactory: NamedLoggerFactory, -)(implicit - val ec: ExecutionContext -) extends ActiveContractStore +)(implicit val ec: ExecutionContext) + extends ActiveContractStore with NamedLogging with InMemoryPrunableByTime { @@ -55,9 +73,10 @@ class InMemoryActiveContractStore( /** Invariant: Never maps to [[ContractStatus.Nonexistent]] */ private[this] val table = TrieMap.empty[LfContractId, ContractStatus] - override def markContractsActive( - contracts: Seq[(LfContractId, TransferCounterO)], + override def markContractsCreatedOrAdded( + contracts: Seq[(LfContractId, TransferCounter)], toc: TimeOfChange, + isCreation: Boolean, )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = { @@ -74,40 +93,73 @@ class InMemoryActiveContractStore( activeContractsData.asSeq.to(LazyList).traverse_ { transferableContract => updateTable( transferableContract.contractId, - _.addCreation(transferableContract, activeContractsData.toc), + _.addCreation(transferableContract, activeContractsData.toc, isCreation = isCreation), ) } }) } } - override def archiveContracts(archivals: Seq[LfContractId], toc: TimeOfChange)(implicit + override def purgeOrArchiveContracts( + archivals: Seq[LfContractId], + toc: TimeOfChange, + isArchival: Boolean, + )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = CheckedT(Future.successful { logger.trace(show"Archiving contracts at $toc: $archivals") - archivals.to(LazyList).traverse_ { case contractId => - updateTable(contractId, _.addArchival(contractId, toc)) + archivals.to(LazyList).traverse_ { contractId => + updateTable(contractId, _.addArchival(contractId, toc, isArchival = isArchival)) } }) override def fetchStates( contractIds: Iterable[LfContractId] - )(implicit traceContext: TraceContext): Future[Map[LfContractId, ContractState]] = - Future.successful { - val snapshot = table.readOnlySnapshot() - contractIds - .to(LazyList) - .mapFilter(contractId => - snapshot.get(contractId).flatMap(_.latestState.map(contractId -> _)) - ) - .toMap + )(implicit traceContext: TraceContext): Future[Map[LfContractId, ContractState]] = { + val snapshot = table.readOnlySnapshot() + + contractIds + .to(LazyList) + .parTraverseFilter(contractId => + snapshot + .get(contractId) + .traverse(status => latestState(status.changes).map(_.map(contractId -> _))) + .map(_.flatten) + ) + .map(_.toMap) + } + + /** Returns the latest [[ActiveContractStore.ContractState]] if any */ + private def latestState( + changes: ChangeJournal + )(implicit traceContext: TraceContext): Future[Option[ContractState]] = { + changes.headOption.traverse { case (change, detail) => + val statusF = detail match { + case ActivenessChangeDetail.Create(transferCounter) => + Future.successful(Active(transferCounter)) + case ActivenessChangeDetail.Archive => Future.successful(Archived) + + case in: ActivenessChangeDetail.TransferIn => Future.successful(Active(in.transferCounter)) + case out: ActivenessChangeDetail.TransferOut => + domainIdFromIdx(out.remoteDomainIdx).map(domainId => + TransferredAway(TargetDomainId(domainId), out.transferCounter) + ) + + case ActivenessChangeDetail.Purge => Future.successful(Purged) + case ActivenessChangeDetail.Add(transferCounter) => + Future.successful(Active(transferCounter)) + + } + + statusF.map(ContractState(_, change.toc)) } + } override def snapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)]] = Future.successful { - val snapshot = SortedMap.newBuilder[LfContractId, (CantonTimestamp, TransferCounterO)] + ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounter)]] = Future.successful { + val snapshot = SortedMap.newBuilder[LfContractId, (CantonTimestamp, TransferCounter)] table.foreach { case (contractId, contractStatus) => contractStatus.activeBy(timestamp).foreach { case (activationTimestamp, transferCounter) => snapshot += (contractId -> (activationTimestamp, transferCounter)) @@ -118,8 +170,8 @@ class InMemoryActiveContractStore( override def snapshot(rc: RequestCounter)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounterO)]] = Future.successful { - val snapshot = SortedMap.newBuilder[LfContractId, (RequestCounter, TransferCounterO)] + ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounter)]] = Future.successful { + val snapshot = SortedMap.newBuilder[LfContractId, (RequestCounter, TransferCounter)] table.foreach { case (contractId, contractStatus) => contractStatus.activeBy(rc).foreach { case (activationRc, transferCounter) => snapshot += (contractId -> (activationRc, transferCounter)) @@ -147,7 +199,7 @@ class InMemoryActiveContractStore( requestCounter: RequestCounter, )(implicit traceContext: TraceContext - ): Future[Map[LfContractId, TransferCounterO]] = { + ): Future[Map[LfContractId, TransferCounter]] = { logger.debug( s"Looking up transfer counters for contracts $contractIds up to but not including $requestCounter" ) @@ -160,11 +212,17 @@ class InMemoryActiveContractStore( Future.successful { contractIds .to(LazyList) - .mapFilter(contractId => + .map(contractId => table .get(contractId) .flatMap(_.activeBy(requestCounter - 1)) - .map { case (_, transferCounter) => + .fold( + ErrorUtil.internalError( + new IllegalStateException( + s"Archived non-transient contract $contractId should have been active in the ACS" + ) + ) + ) { case (_, transferCounter) => contractId -> transferCounter } ) @@ -172,29 +230,66 @@ class InMemoryActiveContractStore( } } + private def prepareTransfers( + transfers: Seq[(LfContractId, TransferDomainId, TransferCounter, TimeOfChange)] + ): CheckedT[Future, AcsError, AcsWarning, Seq[ + (LfContractId, Int, TransferCounter, TimeOfChange) + ]] = { + val domains = transfers.map { case (_, domain, _, _) => domain.unwrap }.distinct + type PreparedTransfer = (LfContractId, Int, TransferCounter, TimeOfChange) + + for { + domainIndices <- getDomainIndices(domains) + + preparedTransfersE = MonadUtil.sequentialTraverse( + transfers + ) { case (cid, remoteDomain, transferCounter, toc) => + domainIndices + .get(remoteDomain.unwrap) + .toRight[AcsError](UnableToFindIndex(remoteDomain.unwrap)) + .map(domainIdx => (cid, domainIdx.index, transferCounter, toc)) + } + + preparedTransfers <- CheckedT.fromChecked(Checked.fromEither(preparedTransfersE)): CheckedT[ + Future, + AcsError, + AcsWarning, + Seq[PreparedTransfer], + ] + } yield preparedTransfers + } + override def transferInContracts( - transferIns: Seq[(LfContractId, SourceDomainId, TransferCounterO, TimeOfChange)] + transferIns: Seq[(LfContractId, SourceDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = - CheckedT(Future.successful { - logger.trace(s"Transferring-in contracts: $transferIns") - transferIns.to(LazyList).traverse_ { case (contractId, sourceDomain, transferCounter, toc) => - updateTable(contractId, _.addTransferIn(contractId, toc, sourceDomain, transferCounter)) - } - }) + ): CheckedT[Future, AcsError, AcsWarning, Unit] = { + logger.trace(s"Transferring-in contracts: $transferIns") + + for { + preparedTransfers <- prepareTransfers(transferIns) + _ <- CheckedT(Future.successful(preparedTransfers.to(LazyList).traverse_ { + case (contractId, sourceDomain, transferCounter, toc) => + updateTable(contractId, _.addTransferIn(contractId, toc, sourceDomain, transferCounter)) + })) + } yield () + } override def transferOutContracts( - transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounterO, TimeOfChange)] + transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = - CheckedT(Future.successful { - logger.trace(s"Transferring-out contracts: $transferOuts") - transferOuts.to(LazyList).traverse_ { case (contractId, targetDomain, transferCounter, toc) => - updateTable(contractId, _.addTransferOut(contractId, toc, targetDomain, transferCounter)) - } - }) + ): CheckedT[Future, AcsError, AcsWarning, Unit] = { + logger.trace(s"Transferring-out contracts: $transferOuts") + + for { + preparedTransfers <- prepareTransfers(transferOuts) + _ <- CheckedT(Future.successful(preparedTransfers.to(LazyList).traverse_ { + case (contractId, sourceDomain, transferCounter, toc) => + updateTable(contractId, _.addTransferOut(contractId, toc, sourceDomain, transferCounter)) + })) + } yield () + } override def doPrune( beforeAndIncluding: CantonTimestamp, @@ -258,9 +353,9 @@ class InMemoryActiveContractStore( s"Provided timestamps are in the wrong order: $fromExclusive and $toInclusive", ) - // obtain the maximum creation timestamp per contract up to a certain rc + // obtain the maximum transfer counter per contract up to a certain rc val latestActivationTransferCounterPerCid - : Map[(LfContractId, RequestCounter), TransferCounterO] = + : Map[(LfContractId, RequestCounter), TransferCounter] = table.toList.flatMap { case (cid, status) => // we only constrain here the upper bound timestamp, because we want to find the // transfer counter of archivals, which might have been activated earlier @@ -277,12 +372,13 @@ class InMemoryActiveContractStore( filterToc .collect { case (ch, detail) if ch.isActivation && ch.toc.rc <= change.toc.rc => - detail.transferCounter + detail.transferCounterO } .maxOption .flatten, ) } + .mapFilter(identity) }.toMap val changesByToc @@ -301,23 +397,21 @@ class InMemoryActiveContractStore( val byTsAndChangeType : Map[TimeOfChange, Map[Boolean, List[(LfContractId, StateChangeType)]]] = changesByToc .fmap(_.groupBy(_._2.isActivation).fmap(_.map { - case (coid, activenessChange, activenessChangeDetail) => - val stateChange = - if (activenessChange.isActivation && !activenessChangeDetail.isTransfer) - StateChangeType(ContractChange.Created, activenessChangeDetail.transferCounter) - else if (activenessChange.isActivation && activenessChangeDetail.isTransfer) - StateChangeType(ContractChange.Assigned, activenessChangeDetail.transferCounter) - else if (!activenessChange.isActivation && activenessChangeDetail.isTransfer) - StateChangeType(ContractChange.Unassigned, activenessChangeDetail.transferCounter) - else - StateChangeType( - ContractChange.Archived, - latestActivationTransferCounterPerCid.getOrElse( - (coid, activenessChange.toc.rc), - activenessChangeDetail.transferCounter, + val stateChange = activenessChangeDetail match { + case change: ActivenessChangeDetail.HasTransferCounter => change.toStateChangeType + + case ActivenessChangeDetail.Archive | ActivenessChangeDetail.Purge => + val transferCounter = latestActivationTransferCounterPerCid.getOrElse( + (coid, activenessChange.toc.rc), + throw new IllegalStateException( + s"Unable to find transfer counter for $coid at ${activenessChange.toc}" ), ) + + StateChangeType(ContractChange.Archived, transferCounter) + } + (coid, stateChange) })) @@ -369,23 +463,27 @@ object InMemoryActiveContractStore { */ type IndividualChange = (ActivenessChange, ActivenessChangeDetail) object IndividualChange { - def create(toc: TimeOfChange, transferCounter: TransferCounterO): IndividualChange = - Activation(toc) -> CreationArchivalDetail(transferCounter) + def create(toc: TimeOfChange, transferCounter: TransferCounter): IndividualChange = + Activation(toc) -> ActivenessChangeDetail.Create(transferCounter) + def add(toc: TimeOfChange, transferCounter: TransferCounter): IndividualChange = + Activation(toc) -> ActivenessChangeDetail.Add(transferCounter) def archive(toc: TimeOfChange): IndividualChange = - Deactivation(toc) -> CreationArchivalDetail(None) + Deactivation(toc) -> ActivenessChangeDetail.Archive + def purge(toc: TimeOfChange): IndividualChange = + Deactivation(toc) -> ActivenessChangeDetail.Purge def transferIn( toc: TimeOfChange, - remoteDomain: DomainId, - transferCounter: TransferCounterO, + remoteDomainIdx: Int, + transferCounter: TransferCounter, ): IndividualChange = - Activation(toc) -> TransferDetails(remoteDomain, transferCounter) + Activation(toc) -> ActivenessChangeDetail.TransferIn(transferCounter, remoteDomainIdx) def transferOut( toc: TimeOfChange, - remoteDomain: DomainId, - transferCounter: TransferCounterO, + remoteDomainIdx: Int, + transferCounter: TransferCounter, ): IndividualChange = - Deactivation(toc) -> TransferDetails(remoteDomain, transferCounter) + Deactivation(toc) -> ActivenessChangeDetail.TransferOut(transferCounter, remoteDomainIdx) } final case class ActivenessChange(toc: TimeOfChange, isActivation: Boolean) { @@ -430,125 +528,100 @@ object InMemoryActiveContractStore { * * @param changes The journal of changes that have been recorded for the contract. * Must be ordered by [[ActivenessChange.reverseOrderingForActivenessChange]]. - * @param latestCreation Tracks the latest creation of the contract, if any. - * Used to detect when another change happens before the contract was created. - * If the contract is created several times, only the latest creation is tracked. - * Transfer-ins do not count as creations. - * @param earliestArchival Tracks the earliest archival of the contract, if any. - * Used to detect when another change happens after the contract was archived. - * If the contract is archived several times, only the earliest archive is tracked. - * Transfer-outs do not count as archivals. */ - final case class ContractStatus private ( - changes: ChangeJournal, - latestCreation: Option[TimeOfChange], - earliestArchival: Option[TimeOfChange], - ) { - import IndividualChange.{archive, create} + final case class ContractStatus private (changes: ChangeJournal) { + import IndividualChange.{add, archive, create, purge} + + private def checkNewChangesJournal( + contractId: LfContractId, + toc: TimeOfChange, + newChangesJournal: ChangeJournal, + ): Checked[AcsError, AcsWarning, Unit] = { + def checkedUnit = Checked.unit[AcsError, AcsWarning] + + val changes = newChangesJournal.toVector + // we want earlier changes first + .reverse + .map { case ActivenessChange(toc, _) -> activenessChangeDetail => + (toc, activenessChangeDetail) + } + + NonEmpty.from(changes).fold(checkedUnit) { changes => + NonEmptyChain + .fromSeq(ActivationsDeactivationsConsistencyCheck(contractId, toc, changes)) + .fold(checkedUnit)(Checked.continues) + } + } private[InMemoryActiveContractStore] def addCreation( transferableContract: ActiveContractData, - creation: TimeOfChange, + toc: TimeOfChange, + isCreation: Boolean, ): Checked[AcsError, AcsWarning, ContractStatus] = { val contractId = transferableContract.contractId - - val nextLatestCreation = latestCreation match { - case None => Checked.result(Some(creation)) - case old @ Some(oldToc) if oldToc == creation => Checked.result(old) - case old @ Some(oldToc) => - val newToc = if (creation > oldToc) Some(creation) else old - Checked.continueWithResult(DoubleContractCreation(contractId, oldToc, creation), newToc) - } - - // We don't report earlier changes if a double creation is detected. - val earlierChanges = - if (nextLatestCreation.successful) - changesBefore(Activation(creation)).map(change => - ChangeBeforeCreation(contractId, creation, change.toc) - ) - else List.empty + val change = + if (isCreation) create(toc, transferableContract.transferCounter) + else add(toc, transferableContract.transferCounter) for { - nextChanges <- addIndividualChange( - contractId, - create(creation, transferableContract.transferCounter), - ) - nextLatestCreation <- nextLatestCreation.appendNonaborts(Chain.fromSeq(earlierChanges)) - nextEarliestArchival <- checkTimestampAgainstArchival(contractId, creation) - } yield ContractStatus(nextChanges, nextLatestCreation, nextEarliestArchival) + nextChanges <- addIndividualChange(contractId, change) + _ <- checkNewChangesJournal(contractId, toc, nextChanges) + } yield this.copy(nextChanges) } private[InMemoryActiveContractStore] def addArchival( contractId: LfContractId, - archival: TimeOfChange, + toc: TimeOfChange, + isArchival: Boolean, ): Checked[AcsError, AcsWarning, ContractStatus] = { - val nextEarliestArchival = earliestArchival match { - case None => Checked.result(Some(archival)) - case old @ Some(oldToc) if oldToc == archival => Checked.result(old) - case old @ Some(oldToc) => - val newToc = if (archival < oldToc) Some(archival) else old - Checked.continueWithResult(DoubleContractArchival(contractId, oldToc, archival), newToc) - } - // We don't report later changes if a double archival is detected. - val laterChanges = - if (nextEarliestArchival.successful) - changesAfter(Deactivation(archival)).map(change => - ChangeAfterArchival(contractId, archival, change.toc) - ) - else List.empty + val change = if (isArchival) archive(toc) else purge(toc) for { - nextChanges <- addIndividualChange(contractId, archive(archival)) - nextLatestCreation <- checkTimestampAgainstCreation(contractId, archival) - nextEarliestArchival <- nextEarliestArchival.appendNonaborts(Chain.fromSeq(laterChanges)) - } yield ContractStatus(nextChanges, nextLatestCreation, nextEarliestArchival) + nextChanges <- addIndividualChange(contractId, change) + _ <- checkNewChangesJournal(contractId, toc, nextChanges) + } yield this.copy(nextChanges) } private[InMemoryActiveContractStore] def addTransferIn( contractId: LfContractId, transfer: TimeOfChange, - sourceDomain: SourceDomainId, - transferCounter: TransferCounterO, + sourceDomainIdx: Int, + transferCounter: TransferCounter, ): Checked[AcsError, AcsWarning, ContractStatus] = for { nextChanges <- addIndividualChange( contractId, - IndividualChange.transferIn(transfer, sourceDomain.unwrap, transferCounter), + IndividualChange.transferIn(transfer, sourceDomainIdx, transferCounter), ) - _ <- transferCounter.traverse_( - checkTransferCounterIncreases( - contractId, - transfer, - _, - ActiveContractStore.TransferType.TransferIn, - ) + _ <- checkTransferCounterIncreases( + contractId, + transfer, + transferCounter, + ActiveContractStore.TransferType.TransferIn, ) - nextLatestCreation <- checkTimestampAgainstCreation(contractId, transfer) - nextEarliestArchival <- checkTimestampAgainstArchival(contractId, transfer) - } yield ContractStatus(nextChanges, nextLatestCreation, nextEarliestArchival) + _ <- checkNewChangesJournal(contractId, transfer, nextChanges) + } yield this.copy(nextChanges) private[InMemoryActiveContractStore] def addTransferOut( contractId: LfContractId, transfer: TimeOfChange, - targetDomain: TargetDomainId, - transferCounter: TransferCounterO, + targetDomainIdx: Int, + transferCounter: TransferCounter, ): Checked[AcsError, AcsWarning, ContractStatus] = for { nextChanges <- addIndividualChange( contractId, - IndividualChange.transferOut(transfer, targetDomain.unwrap, transferCounter), + IndividualChange.transferOut(transfer, targetDomainIdx, transferCounter), ) - _ <- transferCounter.traverse_( + _ <- checkTransferCounterIncreases( contractId, transfer, - _, + transferCounter, ActiveContractStore.TransferType.TransferOut, ) - ) - nextLatestCreation <- checkTimestampAgainstCreation(contractId, transfer) - nextEarliestArchival <- checkTimestampAgainstArchival(contractId, transfer) - } yield ContractStatus(nextChanges, nextLatestCreation, nextEarliestArchival) + _ <- checkNewChangesJournal(contractId, transfer, nextChanges) + } yield this.copy(nextChanges) private[this] def addIndividualChange( contractId: LfContractId, @@ -562,16 +635,6 @@ object InMemoryActiveContractStore { } } - private[this] def checkTimestampAgainstCreation( - contractId: LfContractId, - toc: TimeOfChange, - ): Checked[AcsError, AcsWarning, Option[TimeOfChange]] = - latestCreation match { - case old @ Some(creation) if toc < creation => - Checked.continueWithResult(ChangeBeforeCreation(contractId, creation, toc), old) - case old => Checked.result(old) - } - private[this] def checkTransferCounterIncreases( contractId: LfContractId, toc: TimeOfChange, @@ -587,7 +650,7 @@ object InMemoryActiveContractStore { change: ActivenessChange ): Option[TransferCounterAtChangeInfo] = changes.get(change).map { detail => - ActiveContractStore.TransferCounterAtChangeInfo(change.toc, detail.transferCounter) + ActiveContractStore.TransferCounterAtChangeInfo(change.toc, detail.transferCounterO) } val earliestChangeAfter = @@ -602,7 +665,6 @@ object InMemoryActiveContractStore { toc, transferCounter, latestChangeBefore, - transferType, ) _ <- ActiveContractStore.checkTransferCounterAgainstEarliestAfter( contractId, @@ -614,16 +676,6 @@ object InMemoryActiveContractStore { } yield () } - private[this] def checkTimestampAgainstArchival( - contractId: LfContractId, - toc: TimeOfChange, - ): Checked[AcsError, AcsWarning, Option[TimeOfChange]] = - earliestArchival match { - case old @ Some(archival) if toc > archival => - Checked.continueWithResult(ChangeAfterArchival(contractId, archival, toc), old) - case old => Checked.result(old) - } - private[this] def changesAfter(bound: ActivenessChange): List[ActivenessChange] = { val laterChanges = mutable.SortedSet.newBuilder[ActivenessChange] val iterator = changes.keysIterator @@ -657,19 +709,29 @@ object InMemoryActiveContractStore { /** If the contract is active right after the given `timestamp`, * returns the [[com.digitalasset.canton.data.CantonTimestamp]] of the latest creation or latest transfer-in. */ - def activeBy(timestamp: CantonTimestamp): Option[(CantonTimestamp, TransferCounterO)] = { + def activeBy(timestamp: CantonTimestamp): Option[(CantonTimestamp, TransferCounter)] = { val iter = changes.iteratorFrom(ContractStatus.searchByTimestamp(timestamp)) if (!iter.hasNext) { None } else { val (change, detail) = iter.next() - if (change.isActivation) Some((change.toc.timestamp, detail.transferCounter)) else None + + def changeFor(transferCounter: TransferCounter) = Some( + (change.toc.timestamp, transferCounter) + ) + + detail match { + case TransferIn(transferCounter, _) => changeFor(transferCounter) + case Create(transferCounter) => changeFor(transferCounter) + case Add(transferCounter) => changeFor(transferCounter) + case Archive | _: TransferOut | Purge => None + } } } /** If the contract is active right after the given `rc`, * returns the [[com.digitalasset.canton.RequestCounter]] of the latest creation or latest transfer-in. */ - def activeBy(rc: RequestCounter): Option[(RequestCounter, TransferCounterO)] = + def activeBy(rc: RequestCounter): Option[(RequestCounter, TransferCounter)] = changes .filter { case (activenessChange, _) => activenessChange.toc.rc <= rc @@ -679,24 +741,18 @@ object InMemoryActiveContractStore { (activenessChange.toc, !activenessChange.isActivation) } .flatMap { case (change, detail) => - Option.when(change.isActivation)((change.toc.rc, detail.transferCounter)) + Option.when(change.isActivation)( + ( + change.toc.rc, + detail.transferCounterO.getOrElse( + throw new IllegalStateException( + s"Active contract should have the transfer counter defined" + ) + ), + ) + ) } - /** Returns the latest [[ActiveContractStore.ContractState]] if any */ - def latestState: Option[ContractState] = { - changes.headOption.map { case (change, detail) => - val status = - if (change.isActivation) Active(detail.transferCounter) - else - detail match { - case TransferDetails(targetDomain, transferCounter) => - TransferredAway(TargetDomainId(targetDomain), transferCounter) - case CreationArchivalDetail(transferCounter) => Archived - } - ContractState(status, change.toc) - } - } - def prune(beforeAndIncluding: CantonTimestamp): Option[ContractStatus] = { changes.keys .filter(change => !change.isActivation && change.toc.timestamp <= beforeAndIncluding) @@ -727,31 +783,16 @@ object InMemoryActiveContractStore { } } - private def contractStatusFromChangeJournal(journal: ChangeJournal): Option[ContractStatus] = { - if (journal.nonEmpty) { - val earliestArchival = journal.collect { - case (change, detail) if !change.isActivation && !detail.isTransfer => change.toc - }.lastOption - val latestCreation = journal.collectFirst { - case (change, details) if change.isActivation && !details.isTransfer => change.toc - } - Some(ContractStatus(journal, latestCreation, earliestArchival)) - } else None - } + private def contractStatusFromChangeJournal(journal: ChangeJournal): Option[ContractStatus] = + Option.when(journal.nonEmpty)(ContractStatus(journal)) } object ContractStatus { - private def apply( - changes: ChangeJournal, - latestCreation: Option[TimeOfChange], - earliestArchival: Option[TimeOfChange], - ) = - new ContractStatus(changes, latestCreation, earliestArchival) + private def apply(changes: ChangeJournal) = new ContractStatus(changes) - val Nonexistent = new ContractStatus(SortedMap.empty, None, None) + val Nonexistent = new ContractStatus(SortedMap.empty) private def searchByTimestamp(timestamp: CantonTimestamp): ActivenessChange = Deactivation(TimeOfChange(RequestCounter.MaxValue, timestamp)) } - } diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRegisteredDomainsStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRegisteredDomainsStore.scala index 030e6b757..cc4151f25 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRegisteredDomainsStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRegisteredDomainsStore.scala @@ -33,31 +33,36 @@ class InMemoryRegisteredDomainsStore(override protected val loggerFactory: Named override def addMapping(alias: DomainAlias, domainId: DomainId)(implicit traceContext: TraceContext - ): EitherT[Future, Error, Unit] = blocking(lock.synchronized { - val swapped = for { - _ <- Option(domainAliasMap.get(alias)).fold(Either.right[Either[Error, Unit], Unit](())) { - oldDomainId => - Left( - Either.cond(oldDomainId == domainId, (), DomainAliasAlreadyAdded(alias, oldDomainId)) - ) - } - _ <- Option(domainAliasMap.inverse.get(domainId)) - .fold(Either.right[Either[Error, Unit], Unit](())) { oldAlias => - Left(Either.cond(oldAlias == alias, (), DomainIdAlreadyAdded(domainId, oldAlias))) + ): EitherT[Future, Error, Unit] = { + val swapped = blocking(lock.synchronized { + for { + _ <- Option(domainAliasMap.get(alias)).fold(Either.right[Either[Error, Unit], Unit](())) { + oldDomainId => + Left( + Either.cond(oldDomainId == domainId, (), DomainAliasAlreadyAdded(alias, oldDomainId)) + ) } - } yield { - val _ = domainAliasMap.put(alias, domainId) - } + _ <- Option(domainAliasMap.inverse.get(domainId)) + .fold(Either.right[Either[Error, Unit], Unit](())) { oldAlias => + Left(Either.cond(oldAlias == alias, (), DomainIdAlreadyAdded(domainId, oldAlias))) + } + } yield { + val _ = domainAliasMap.put(alias, domainId) + } + }) EitherT.fromEither[Future](swapped.swap.getOrElse(Right(()))) - }) + } override def aliasToDomainIdMap(implicit traceContext: TraceContext - ): Future[Map[DomainAlias, DomainId]] = blocking { - lock.synchronized { - import scala.jdk.CollectionConverters.* - Future.successful(Map(domainAliasMap.asScala.toSeq*)) + ): Future[Map[DomainAlias, DomainId]] = { + val map = blocking { + lock.synchronized { + import scala.jdk.CollectionConverters.* + Map(domainAliasMap.asScala.toSeq*) + } } + Future.successful(map) } override def close(): Unit = () diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRequestJournalStore.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRequestJournalStore.scala index c4428a504..d0ca6786f 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRequestJournalStore.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemoryRequestJournalStore.scala @@ -67,23 +67,27 @@ class InMemoryRequestJournalStore(protected val loggerFactory: NamedLoggerFactor ), ) ) - else - blocking(requestTable.synchronized { + else { + // TODO(#17726) Why do we need the synchronized block here? The TrieMap is already thread-safe + // and other places in this store do not synchronize when they access the requestTable. + val resultE = blocking(requestTable.synchronized { requestTable.get(rc) match { - case None => EitherT.leftT(UnknownRequestCounter(rc)) + case None => Left(UnknownRequestCounter(rc)) case Some(oldResult) => if (oldResult.requestTimestamp != requestTimestamp) - EitherT.leftT( + Left( InconsistentRequestTimestamp(rc, oldResult.requestTimestamp, requestTimestamp) ) else if (oldResult.state == newState && oldResult.commitTime == commitTime) - EitherT.pure(()) + Right(()) else { requestTable.put(rc, oldResult.tryAdvance(newState, commitTime)).discard - EitherT.rightT(()) + Right(()) } } }) + EitherT.fromEither(resultE) + } def delete(rc: RequestCounter)(implicit traceContext: TraceContext): Future[Unit] = { val oldState = requestTable.remove(rc) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala index d600553da..c7d6fbcd4 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/store/memory/InMemorySyncDomainPersistentState.scala @@ -10,12 +10,12 @@ import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.participant.store.EventLogId.DomainEventLogId import com.digitalasset.canton.participant.store.SyncDomainPersistentState import com.digitalasset.canton.protocol.TargetDomainId -import com.digitalasset.canton.store.IndexedDomain import com.digitalasset.canton.store.memory.{ InMemorySendTrackerStore, InMemorySequencedEventStore, InMemorySequencerCounterTrackerStore, } +import com.digitalasset.canton.store.{IndexedDomain, IndexedStringStore} import com.digitalasset.canton.time.Clock import com.digitalasset.canton.topology.store.TopologyStoreId.DomainStore import com.digitalasset.canton.topology.store.memory.InMemoryTopologyStoreX @@ -24,13 +24,14 @@ import com.digitalasset.canton.version.ProtocolVersion import scala.concurrent.ExecutionContext -class InMemorySyncDomainPersistentStateX( +class InMemorySyncDomainPersistentState( clock: Clock, crypto: Crypto, override val domainId: IndexedDomain, val protocolVersion: ProtocolVersion, override val enableAdditionalConsistencyChecks: Boolean, enableTopologyTransactionValidation: Boolean, + indexedStringStore: IndexedStringStore, val loggerFactory: NamedLoggerFactory, val timeouts: ProcessingTimeout, val futureSupervisor: FutureSupervisor, @@ -41,7 +42,8 @@ class InMemorySyncDomainPersistentStateX( val eventLog = new InMemorySingleDimensionEventLog(DomainEventLogId(domainId), loggerFactory) val contractStore = new InMemoryContractStore(loggerFactory) - val activeContractStore = new InMemoryActiveContractStore(protocolVersion, loggerFactory) + val activeContractStore = + new InMemoryActiveContractStore(indexedStringStore, protocolVersion, loggerFactory) val transferStore = new InMemoryTransferStore(TargetDomainId(domainId.item), loggerFactory) val sequencedEventStore = new InMemorySequencedEventStore(loggerFactory) val requestJournalStore = new InMemoryRequestJournalStore(loggerFactory) diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala index b5676b425..18a8a7f06 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/CantonSyncService.scala @@ -19,6 +19,9 @@ import com.daml.nameof.NameOf.functionFullName import com.daml.nonempty.NonEmpty import com.digitalasset.canton.* import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader +import com.digitalasset.canton.common.domain.grpc.SequencerInfoLoader.{ + LoadSequencerEndpointInformationResult +} import com.digitalasset.canton.concurrent.FutureSupervisor import com.digitalasset.canton.config.CantonRequireTypes.String256M import com.digitalasset.canton.config.ProcessingTimeout @@ -39,14 +42,7 @@ import com.digitalasset.canton.ledger.error.{CommonErrors, PackageServiceErrors} import com.digitalasset.canton.ledger.participant.state import com.digitalasset.canton.ledger.participant.state.v2.ReadService.ConnectedDomainResponse import com.digitalasset.canton.ledger.participant.state.v2.* -import com.digitalasset.canton.lifecycle.{ - CloseContext, - FlagCloseable, - FutureUnlessShutdown, - HasCloseContext, - Lifecycle, - UnlessShutdown, -} +import com.digitalasset.canton.lifecycle.* import com.digitalasset.canton.logging.{ErrorLoggingContext, NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.networking.grpc.CantonGrpcUtil.GrpcErrors import com.digitalasset.canton.participant.Pruning.* @@ -74,7 +70,11 @@ import com.digitalasset.canton.participant.protocol.transfer.{ IncompleteTransferData, TransferCoordination, } -import com.digitalasset.canton.participant.pruning.{NoOpPruningProcessor, PruningProcessor} +import com.digitalasset.canton.participant.pruning.{ + AcsCommitmentProcessor, + NoOpPruningProcessor, + PruningProcessor, +} import com.digitalasset.canton.participant.store.DomainConnectionConfigStore.MissingConfigForAlias import com.digitalasset.canton.participant.store.MultiDomainEventLog.PublicationData import com.digitalasset.canton.participant.store.* @@ -94,6 +94,7 @@ import com.digitalasset.canton.protocol.* import com.digitalasset.canton.resource.DbStorage.PassiveInstanceException import com.digitalasset.canton.resource.Storage import com.digitalasset.canton.scheduler.Schedulers +import com.digitalasset.canton.sequencing.SequencerConnectionValidation import com.digitalasset.canton.sequencing.client.SequencerClient import com.digitalasset.canton.sequencing.client.SequencerClient.CloseReason import com.digitalasset.canton.store.IndexedStringStore @@ -117,7 +118,7 @@ import java.util.concurrent.CompletionStage import java.util.concurrent.atomic.AtomicReference import scala.collection.concurrent.TrieMap import scala.concurrent.duration.Duration -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.{ExecutionContextExecutor, Future} import scala.jdk.FutureConverters.* import scala.util.chaining.scalaUtilChainingOps import scala.util.{Failure, Right, Success} @@ -160,7 +161,7 @@ class CantonSyncService( val isActive: () => Boolean, futureSupervisor: FutureSupervisor, protected val loggerFactory: NamedLoggerFactory, -)(implicit ec: ExecutionContext, mat: Materializer, val tracer: Tracer) +)(implicit ec: ExecutionContextExecutor, mat: Materializer, val tracer: Tracer) extends state.v2.WriteService with WriteParticipantPruningService with state.v2.ReadService @@ -178,6 +179,8 @@ class CantonSyncService( MutableHealthComponent(loggerFactory, SyncDomainEphemeralState.healthName, timeouts) val sequencerClientHealth: MutableHealthComponent = MutableHealthComponent(loggerFactory, SequencerClient.healthName, timeouts) + val acsCommitmentProcessorHealth: MutableHealthComponent = + MutableHealthComponent(loggerFactory, AcsCommitmentProcessor.healthName, timeouts) val maxDeduplicationDuration: NonNegativeFiniteDuration = participantNodePersistentState.value.settingsStore.settings.maxDeduplicationDuration @@ -780,23 +783,45 @@ class CantonSyncService( * @return Error or unit. */ def addDomain( - config: DomainConnectionConfig - )(implicit traceContext: TraceContext): EitherT[Future, SyncServiceError, Unit] = { - domainConnectionConfigStore - .put(config, DomainConnectionConfigStore.Active) - .leftMap(e => SyncServiceError.SyncServiceAlreadyAdded.Error(e.alias)) - } + config: DomainConnectionConfig, + sequencerConnectionValidation: SequencerConnectionValidation, + )(implicit traceContext: TraceContext): EitherT[Future, SyncServiceError, Unit] = + for { + _ <- validateSequencerConnection(config, sequencerConnectionValidation) + _ <- domainConnectionConfigStore + .put(config, DomainConnectionConfigStore.Active) + .leftMap(e => SyncServiceError.SyncServiceAlreadyAdded.Error(e.alias): SyncServiceError) + } yield () + + private def validateSequencerConnection( + config: DomainConnectionConfig, + sequencerConnectionValidation: SequencerConnectionValidation, + )(implicit + traceContext: TraceContext + ): EitherT[Future, SyncServiceError, Unit] = + sequencerInfoLoader + .validateSequencerConnection( + config.domain, + config.domainId, + config.sequencerConnections, + sequencerConnectionValidation, + ) + .leftMap(SyncServiceError.SyncServiceInconsistentConnectivity.Error(_): SyncServiceError) /** Modifies the settings of the sync-service's configuration * * NOTE: This does not automatically reconnect the sync service. */ def modifyDomain( - config: DomainConnectionConfig + config: DomainConnectionConfig, + sequencerConnectionValidation: SequencerConnectionValidation, )(implicit traceContext: TraceContext): EitherT[Future, SyncServiceError, Unit] = - domainConnectionConfigStore - .replace(config) - .leftMap(e => SyncServiceError.SyncServiceUnknownDomain.Error(e.alias)) + for { + _ <- validateSequencerConnection(config, sequencerConnectionValidation) + _ <- domainConnectionConfigStore + .replace(config) + .leftMap(e => SyncServiceError.SyncServiceUnknownDomain.Error(e.alias): SyncServiceError) + } yield () def migrateDomain( source: DomainAlias, @@ -813,7 +838,11 @@ class CantonSyncService( for { targetDomainInfo <- performUnlessClosingEitherU(functionFullName)( sequencerInfoLoader - .loadSequencerEndpoints(target.domain, target.sequencerConnections)( + .loadAndAggregateSequencerEndpoints( + target.domain, + target.sequencerConnections, + SequencerConnectionValidation.Active, + )( traceContext, CloseContext(this), ) @@ -1331,6 +1360,7 @@ class CantonSyncService( _ = syncDomainHealth.set(syncDomain) _ = ephemeralHealth.set(syncDomain.ephemeral) _ = sequencerClientHealth.set(syncDomain.sequencerClient.healthComponent) + _ = acsCommitmentProcessorHealth.set(syncDomain.acsCommitmentProcessor.healthComponent) _ = syncDomain.resolveUnhealthy() _ = connectedDomainsMap += (domainId -> syncDomain) @@ -1569,6 +1599,7 @@ class CantonSyncService( syncDomainHealth, ephemeralHealth, sequencerClientHealth, + acsCommitmentProcessorHealth, ) Lifecycle.close(instances*)(logger) @@ -1812,7 +1843,7 @@ object CantonSyncService { sequencerInfoLoader: SequencerInfoLoader, futureSupervisor: FutureSupervisor, loggerFactory: NamedLoggerFactory, - )(implicit ec: ExecutionContext, mat: Materializer, tracer: Tracer): T + )(implicit ec: ExecutionContextExecutor, mat: Materializer, tracer: Tracer): T } object DefaultFactory extends Factory[CantonSyncService] { @@ -1842,7 +1873,7 @@ object CantonSyncService { futureSupervisor: FutureSupervisor, loggerFactory: NamedLoggerFactory, )(implicit - ec: ExecutionContext, + ec: ExecutionContextExecutor, mat: Materializer, tracer: Tracer, ): CantonSyncService = @@ -1958,6 +1989,29 @@ object SyncServiceError extends SyncServiceErrorGroup { with SyncServiceError } + @Explanation( + """This error is reported in case of validation failures when attempting to register new or change existing + sequencer connections. This can be caused by unreachable nodes, a bad TLS configuration, or in case of + a mismatch of domain-ids reported by the sequencers or mismatched sequencer-ids within a sequencer group.""" + ) + @Resolution( + """Check that the connection settings provided are correct. If they are but correspond to temporarily + inactive sequencers, you may also turn off the validation. + """ + ) + object SyncServiceInconsistentConnectivity + extends ErrorCode( + "SYNC_SERVICE_BAD_CONNECTIVITY", + ErrorCategory.InvalidGivenCurrentSystemStateOther, + ) { + final case class Error(errors: Seq[LoadSequencerEndpointInformationResult.NotValid])(implicit + val loggingContext: ErrorLoggingContext + ) extends CantonError.Impl( + cause = s"The provided sequencer connections are inconsistent: ${errors}." + ) + with SyncServiceError + } + abstract class MigrationErrors extends ErrorGroup() abstract class DomainRegistryErrorGroup extends ErrorGroup() diff --git a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala index 2a09776fe..4d4c270c9 100644 --- a/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala +++ b/community/participant/src/main/scala/com/digitalasset/canton/participant/sync/SyncDomain.scala @@ -242,7 +242,7 @@ class SyncDomain( loggerFactory, ) - private val acsCommitmentProcessor = { + private[canton] val acsCommitmentProcessor = { val listener = new AcsCommitmentProcessor( domainId, participantId, @@ -253,7 +253,6 @@ class SyncDomain( journalGarbageCollector.observer, pruningMetrics, staticDomainParameters.protocolVersion, - staticDomainParameters.acsCommitmentsCatchUp, timeouts, futureSupervisor, persistent.activeContractStore, @@ -437,8 +436,8 @@ class SyncDomain( val changeWithAdjustedTransferCountersForUnassignments = ActiveContractIdsChange( change.activations, change.deactivations.fmap { - case StateChangeType(ContractChange.Unassigned, transferCounter) => - StateChangeType(ContractChange.Unassigned, transferCounter.map(_ - 1)) + case StateChangeType(ContractChange.TransferredOut, transferCounter) => + StateChangeType(ContractChange.TransferredOut, transferCounter - 1) case change => change }, ) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/data/GeneratorsData.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/data/GeneratorsData.scala index 63fdc960c..4b4441e9e 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/data/GeneratorsData.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/data/GeneratorsData.scala @@ -22,7 +22,7 @@ final class GeneratorsData( Arbitrary(for { domainId <- Arbitrary.arbitrary[DomainId] contract <- serializableContractArb(canHaveEmptyKey = true).arbitrary - transferCounter <- transferCounterOGen + transferCounter <- transferCounterGen ac = ActiveContract.create(domainId, contract, transferCounter)(protocolVersion) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala index a48e382b4..b76710e3e 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/admin/inspection/AcsInspectionTest.scala @@ -166,7 +166,7 @@ object AcsInspectionTest extends MockitoSugar with ArgumentMatchersSugar { val allContractIds = contracts.keys ++ missingContracts - val snapshot = allContractIds.map(_ -> (CantonTimestamp.Epoch, Option.empty[TransferCounter])) + val snapshot = allContractIds.map(_ -> (CantonTimestamp.Epoch, TransferCounter.Genesis)) val acs = mock[ActiveContractStore] when(acs.snapshot(any[CantonTimestamp])) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala index fc2ac84bd..63e9aa5f5 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/ProtocolProcessorTest.scala @@ -229,13 +229,14 @@ class ProtocolProcessorTest val multiDomainEventLog = mock[MultiDomainEventLog] val clock = new WallClock(timeouts, loggerFactory) val persistentState = - new InMemorySyncDomainPersistentStateX( + new InMemorySyncDomainPersistentState( clock, crypto.crypto, IndexedDomain.tryCreate(domain, 1), testedProtocolVersion, enableAdditionalConsistencyChecks = true, enableTopologyTransactionValidation = false, + new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 1), // only one domain needed loggerFactory, timeouts, futureSupervisor, diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala index 13f72a447..a38355b72 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectionHelpers.scala @@ -9,6 +9,7 @@ import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} import com.digitalasset.canton.participant.store.ActiveContractStore.{ Active, Archived, + Purged, TransferredAway, } import com.digitalasset.canton.participant.store.memory.{ @@ -24,6 +25,7 @@ import com.digitalasset.canton.participant.store.{ import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.* import com.digitalasset.canton.sequencing.protocol.MediatorsOfDomain +import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore import com.digitalasset.canton.topology.DomainId import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.{ @@ -32,7 +34,6 @@ import com.digitalasset.canton.{ LfPartyId, ScalaFuturesWithPatience, TransferCounter, - TransferCounterO, } import org.scalatest.AsyncTestSuite @@ -45,8 +46,10 @@ private[protocol] trait ConflictDetectionHelpers { def parallelExecutionContext: ExecutionContext = executorService + private lazy val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 2) + def mkEmptyAcs(): ActiveContractStore = - new InMemoryActiveContractStore(testedProtocolVersion, loggerFactory)( + new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory)( parallelExecutionContext ) @@ -83,8 +86,7 @@ private[protocol] trait ConflictDetectionHelpers { private[protocol] object ConflictDetectionHelpers extends ScalaFuturesWithPatience { - private val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis def insertEntriesAcs( acs: ActiveContractStore, @@ -94,10 +96,10 @@ private[protocol] object ConflictDetectionHelpers extends ScalaFuturesWithPatien .traverse(entries) { case (coid, toc, Active(_transferCounter)) => acs - .markContractActive(coid -> initialTransferCounter, toc) + .markContractCreated(coid -> initialTransferCounter, toc) .value - case (coid, toc, Archived) => - acs.archiveContract(coid, toc).value + case (coid, toc, Archived) => acs.archiveContract(coid, toc).value + case (coid, toc, Purged) => acs.purgeContracts(Seq(coid), toc).value case (coid, toc, TransferredAway(targetDomain, transferCounter)) => acs.transferOutContract(coid, toc, targetDomain, transferCounter).value } @@ -183,7 +185,7 @@ private[protocol] object ConflictDetectionHelpers extends ScalaFuturesWithPatien def mkCommitSet( arch: Set[LfContractId] = Set.empty, create: Set[LfContractId] = Set.empty, - tfOut: Map[LfContractId, (DomainId, TransferCounterO)] = Map.empty, + tfOut: Map[LfContractId, (DomainId, TransferCounter)] = Map.empty, tfIn: Map[LfContractId, TransferId] = Map.empty, ): CommitSet = { val contractHash = ExampleTransactionFactory.lfHash(0) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala index c34082b12..23abd2843 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/ConflictDetectorTest.scala @@ -50,13 +50,7 @@ import com.digitalasset.canton.protocol.{ExampleTransactionFactory, LfContractId import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.util.{Checked, CheckedT} import com.digitalasset.canton.version.HasTestCloseContext -import com.digitalasset.canton.{ - BaseTest, - HasExecutorService, - RequestCounter, - TransferCounter, - TransferCounterO, -} +import com.digitalasset.canton.{BaseTest, HasExecutorService, RequestCounter, TransferCounter} import org.scalactic.source import org.scalatest.Assertion import org.scalatest.wordspec.AsyncWordSpec @@ -86,10 +80,9 @@ class ConflictDetectorTest private val transfer1 = TransferId(sourceDomain1, Epoch) private val transfer2 = TransferId(sourceDomain2, Epoch) - private val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) - private val transferCounter1 = initialTransferCounter.map(_ + 1) - private val transferCounter2 = initialTransferCounter.map(_ + 2) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis + private val transferCounter1 = initialTransferCounter + 1 + private val transferCounter2 = initialTransferCounter + 2 private val active = Active(initialTransferCounter) @@ -264,7 +257,7 @@ class ConflictDetectorTest cd = mkCd(acs) cr <- prefetchAndCheck(cd, rc, mkActivenessSet(create = Set(coid00))) - _ = acs.setCreateHook { (_, _) => + _ = acs.setCreateAddHook { (_, _) => // Insert the same request with a different activeness set while the ACS updates happen loggerFactory .assertInternalErrorAsync[IllegalConflictDetectionStateException]( @@ -319,7 +312,7 @@ class ConflictDetectorTest _ = checkContractState(cd, coid22, 0, 1, 0)(s"lock contract $coid22 for creation") toc = TimeOfChange(rc, ofEpochMilli(2)) - _ = acs.setCreateHook { (coids, ToC) => + _ = acs.setCreateAddHook { (coids, ToC) => Future.successful { assert(coids.toSet == Set(coid21 -> initialTransferCounter) && ToC == toc) checkContractState(cd, coid21, active, toc, 0, 0, 1)(s"Contract $coid01 is active") @@ -328,7 +321,7 @@ class ConflictDetectorTest ) } } - _ = acs.setArchiveHook { (coids, ToC) => + _ = acs.setArchivePurgeHook { (coids, ToC) => Future.successful { assert(coids.toSet == Set(coid00) && ToC == toc) checkContractState(cd, coid00, Archived, toc, 0, 0, 1)( @@ -467,7 +460,7 @@ class ConflictDetectorTest _ = checkContractState(cd, coid21, 1, 1 + 1, 0)(s"Contract $coid21 in creation is locked") // Check that the in-memory states of contracts are as expected after finalizing the first request, but before the updates are persisted - _ = acs.setCreateHook { (_, _) => + _ = acs.setCreateAddHook { (_, _) => Future.successful { checkContractState(cd, coid20, active, toc, 0, 1, 1)(s"Contract $coid20 remains locked") checkContractState(cd, coid21, 1, 1, 0)( @@ -475,7 +468,7 @@ class ConflictDetectorTest ) } } - _ = acs.setArchiveHook { (_, _) => + _ = acs.setArchivePurgeHook { (_, _) => Future.successful { checkContractState(cd, coid00, active, toc0, 1, 1, 0)(s"$coid00 remains locked once") checkContractState(cd, coid11, Archived, toc, 1, 0, 1)(s"$coid11 is being archived") @@ -552,7 +545,7 @@ class ConflictDetectorTest ) // Finalize first request and make sure that the in-memory states are up to date while the ACS updates are being written - _ = acs.setCreateHook { (_, _) => + _ = acs.setCreateAddHook { (_, _) => Future.successful { checkContractState(cd, coid01, active, toc1, 0, 1, 1)( s"Contract $coid01 is being created" @@ -649,7 +642,7 @@ class ConflictDetectorTest } // Finalize second request - _ = acs.setArchiveHook { (_, _) => + _ = acs.setArchivePurgeHook { (_, _) => Future.successful { checkContractState(cd, coid00, Archived, toc1, 0, 1, 1)( s"Archival for $coid00 retains the lock for the other request" @@ -685,7 +678,7 @@ class ConflictDetectorTest _ = assert(cr2 == mkActivenessResult(locked = Set(coid10))) // Finalize first request - _ = acs.setArchiveHook { (_, _) => + _ = acs.setArchivePurgeHook { (_, _) => Future.successful { checkContractState(cd, coid00, Archived, toc1, 0, 0, 1)( s"Double archived contract $coid00 has a pending write" @@ -771,11 +764,9 @@ class ConflictDetectorTest .flatten .failOnShutdown _ = assert( - fin1.leftMap(_.toList.toSet) == Left( - Set( - AcsError(DoubleContractCreation(coid01, toc0, toc1)), - AcsError(ChangeAfterArchival(coid01, toc0, toc1)), - ) + fin1.left.value.toList.toSet == Set( + AcsError(DoubleContractCreation(coid01, toc0, toc1)), + AcsError(ChangeAfterArchival(coid01, toc0, toc1)), ) ) _ <- checkContractState(acs, coid10, (active, toc1))(s"contract $coid10 is created") @@ -874,7 +865,7 @@ class ConflictDetectorTest } // Finalize first request - _ = acs.setCreateHook { (_, _) => + _ = acs.setCreateAddHook { (_, _) => Future.successful { checkContractState(cd, coid00, Archived, toc1, 0, 0, 1)( s"Contract $coid00 has a pending creation, but remains archived" @@ -1038,7 +1029,7 @@ class ConflictDetectorTest ) // Finalize the second request - _ = acs.setArchiveHook((_, _) => + _ = acs.setArchivePurgeHook((_, _) => finalizeForthRequest(cd) ) // This runs while request 1's ACS updates are written finF1 <- cd.finalizeRequest(commitSet1, toc1).failOnShutdown @@ -1105,7 +1096,7 @@ class ConflictDetectorTest _ = cr3 shouldBe actRes3 // Finalize the first request and do a lot of stuff while the updates are being written - _ = acs.setArchiveHook((_, _) => storeHookRequest0(cd, acs)) + _ = acs.setArchivePurgeHook((_, _) => storeHookRequest0(cd, acs)) finF0 <- cd.finalizeRequest(commitSet0, toc0).failOnShutdown _ = finF0Complete.success(()) fin0 <- finF0.failOnShutdown diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/RequestTrackerTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/RequestTrackerTest.scala index 9d3963608..14dfadc63 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/RequestTrackerTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/conflictdetection/RequestTrackerTest.scala @@ -14,13 +14,7 @@ import com.digitalasset.canton.participant.store.ActiveContractStore.* import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.{ExampleTransactionFactory, LfContractId} import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.{ - BaseTest, - RequestCounter, - SequencerCounter, - TransferCounter, - TransferCounterO, -} +import com.digitalasset.canton.{BaseTest, RequestCounter, SequencerCounter, TransferCounter} import org.scalatest.Assertion import org.scalatest.wordspec.AsyncWordSpec @@ -37,8 +31,7 @@ private[conflictdetection] trait RequestTrackerTest { val coid10: LfContractId = ExampleTransactionFactory.suffixedId(1, 0) val coid11: LfContractId = ExampleTransactionFactory.suffixedId(1, 1) - private val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis private val active = Active(initialTransferCounter) @@ -1248,7 +1241,7 @@ private[conflictdetection] trait RequestTrackerTest { protected def checkSnapshot( acs: ActiveContractStore, ts: CantonTimestamp, - expected: Map[LfContractId, (CantonTimestamp, TransferCounterO)], + expected: Map[LfContractId, (CantonTimestamp, TransferCounter)], ): Future[Assertion] = acs .snapshot(ts) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala index 5ba8ede18..b4ebec00f 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInProcessingStepsTest.scala @@ -45,6 +45,7 @@ import com.digitalasset.canton.protocol.ExampleTransactionFactory.{submitter, su import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.messages.* import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore import com.digitalasset.canton.store.{IndexedDomain, SessionKeyStore} import com.digitalasset.canton.time.{DomainTimeTracker, TimeProofTestUtil, WallClock} import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex @@ -80,8 +81,7 @@ class TransferInProcessingStepsTest extends AsyncWordSpec with BaseTest with Has UniqueIdentifier.tryFromProtoPrimitive("bothdomains::participant") ) - private val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis private def submitterInfo(submitter: LfPartyId): TransferSubmitterMetadata = { TransferSubmitterMetadata( @@ -94,7 +94,7 @@ class TransferInProcessingStepsTest extends AsyncWordSpec with BaseTest with Has ) } - private val identityFactory = TestingTopologyX() + private lazy val identityFactory = TestingTopologyX() .withDomains(sourceDomain.unwrap) .withReversedTopology( Map(submittingParticipant -> Map(party1 -> ParticipantPermission.Submission)) @@ -102,7 +102,7 @@ class TransferInProcessingStepsTest extends AsyncWordSpec with BaseTest with Has .withSimpleParticipants(participant) // required such that `participant` gets a signing key .build(loggerFactory) - private val cryptoSnapshot = + private lazy val cryptoSnapshot = identityFactory .forOwnerAndDomain(submittingParticipant, sourceDomain.unwrap) .currentSnapshotApproximation @@ -112,20 +112,23 @@ class TransferInProcessingStepsTest extends AsyncWordSpec with BaseTest with Has private val seedGenerator = new SeedGenerator(crypto.pureCrypto) - private val transferInProcessingSteps = + private lazy val transferInProcessingSteps = testInstance(targetDomain, Set(party1), Set(party1), cryptoSnapshot, None) + private lazy val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 1) + private def statefulDependencies : Future[(SyncDomainPersistentState, SyncDomainEphemeralState)] = { val multiDomainEventLog = mock[MultiDomainEventLog] val persistentState = - new InMemorySyncDomainPersistentStateX( + new InMemorySyncDomainPersistentState( clock, crypto, IndexedDomain.tryCreate(targetDomain.unwrap, 1), testedProtocolVersion, enableAdditionalConsistencyChecks = true, enableTopologyTransactionValidation = false, + indexedStringStore = indexedStringStore, loggerFactory = loggerFactory, timeouts = timeouts, futureSupervisor = futureSupervisor, diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidationTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidationTest.scala index a240074cf..8d6b11ee3 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidationTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferInValidationTest.scala @@ -50,8 +50,7 @@ class TransferInValidationTest UniqueIdentifier.tryFromProtoPrimitive("bothdomains::participant") ) - private val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis private def submitterInfo(submitter: LfPartyId): TransferSubmitterMetadata = { TransferSubmitterMetadata( @@ -222,7 +221,7 @@ class TransferInValidationTest targetDomain, targetMediator, transferOutResult, - transferCounter = transferData.transferCounter.map(_ + 1), + transferCounter = transferData.transferCounter + 1, ) for { result <- @@ -309,7 +308,7 @@ class TransferInValidationTest targetMediator: MediatorsOfDomain, transferOutResult: DeliveredTransferOutResult, uuid: UUID = new UUID(4L, 5L), - transferCounter: TransferCounterO = initialTransferCounter, + transferCounter: TransferCounter = initialTransferCounter, ): FullTransferInTree = { val seed = seedGenerator.generateSaltSeed() valueOrFail( diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingStepsTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingStepsTest.scala index ee13370a1..7a3c0f381 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingStepsTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutProcessingStepsTest.scala @@ -40,6 +40,7 @@ import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.* import com.digitalasset.canton.protocol.messages.* import com.digitalasset.canton.sequencing.protocol.* +import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore import com.digitalasset.canton.store.{IndexedDomain, SessionKeyStore} import com.digitalasset.canton.time.{DomainTimeTracker, TimeProofTestUtil, WallClock} import com.digitalasset.canton.topology.MediatorGroup.MediatorGroupIndex @@ -65,7 +66,6 @@ import com.digitalasset.canton.{ RequestCounter, SequencerCounter, TransferCounter, - TransferCounterO, } import org.scalatest.Assertion import org.scalatest.wordspec.AsyncWordSpec @@ -110,8 +110,7 @@ final class TransferOutProcessingStepsTest private val packageName = LfPackageName.assertFromString("transferoutprocessingstepstestpackagename") - private val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis private def submitterMetadata(submitter: LfPartyId): TransferSubmitterMetadata = { TransferSubmitterMetadata( @@ -128,16 +127,18 @@ final class TransferOutProcessingStepsTest private val crypto = TestingIdentityFactoryX.newCrypto(loggerFactory)(submittingParticipant) - private val multiDomainEventLog = mock[MultiDomainEventLog] + private lazy val multiDomainEventLog = mock[MultiDomainEventLog] private val clock = new WallClock(timeouts, loggerFactory) - private val persistentState = - new InMemorySyncDomainPersistentStateX( + private lazy val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 1) + private lazy val persistentState = + new InMemorySyncDomainPersistentState( clock, crypto, IndexedDomain.tryCreate(sourceDomain.unwrap, 1), testedProtocolVersion, enableAdditionalConsistencyChecks = true, enableTopologyTransactionValidation = false, + indexedStringStore = indexedStringStore, loggerFactory, timeouts, futureSupervisor, @@ -581,7 +582,7 @@ final class TransferOutProcessingStepsTest contract, ) _ <- persistentState.activeContractStore - .markContractsActive( + .markContractsCreated( Seq(contractId -> initialTransferCounter), TimeOfChange(RequestCounter(1), timeEvent.timestamp), ) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationTest.scala index 2e5860493..22c8e9afb 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/protocol/transfer/TransferOutValidationTest.scala @@ -44,8 +44,7 @@ class TransferOutValidationTest private val participant = ParticipantId.tryFromProtoPrimitive("PAR::bothdomains::participant") - private val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis private def submitterInfo(submitter: LfPartyId): TransferSubmitterMetadata = { TransferSubmitterMetadata( @@ -151,7 +150,7 @@ class TransferOutValidationTest newStakeholders: Set[LfPartyId], sourceProtocolVersion: SourceProtocolVersion, expectedTemplateId: LfTemplateId, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ): EitherT[FutureUnlessShutdown, TransferProcessorError, Unit] = { val transferOutRequest = TransferOutRequest( submitterInfo(submitterParty1), diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala index f83576a4e..f50e20bc4 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/pruning/AcsCommitmentProcessorTest.scala @@ -13,6 +13,7 @@ import com.digitalasset.canton.config.RequireTypes.{PositiveInt, PositiveNumeric import com.digitalasset.canton.config.{DefaultProcessingTimeouts, NonNegativeDuration} import com.digitalasset.canton.crypto.* import com.digitalasset.canton.data.{CantonTimestamp, CantonTimestampSecond} +import com.digitalasset.canton.logging.LogEntry import com.digitalasset.canton.participant.event.{ AcsChange, ContractMetadataAndTransferCounter, @@ -30,6 +31,10 @@ import com.digitalasset.canton.participant.protocol.conflictdetection.CommitSet. } import com.digitalasset.canton.participant.protocol.submission.* import com.digitalasset.canton.participant.pruning +import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.Errors.DegradationError.{ + AcsCommitmentDegradation, + AcsCommitmentDegradationWithIneffectiveConfig, +} import com.digitalasset.canton.participant.pruning.AcsCommitmentProcessor.{ CachedCommitments, CommitmentSnapshot, @@ -49,14 +54,16 @@ import com.digitalasset.canton.protocol.messages.* import com.digitalasset.canton.sequencing.client.* import com.digitalasset.canton.sequencing.protocol.* import com.digitalasset.canton.store.CursorPrehead -import com.digitalasset.canton.store.memory.InMemorySequencerCounterTrackerStore +import com.digitalasset.canton.store.memory.{ + InMemoryIndexedStringStore, + InMemorySequencerCounterTrackerStore, +} import com.digitalasset.canton.time.PositiveSeconds import com.digitalasset.canton.topology.* import com.digitalasset.canton.topology.transaction.ParticipantPermission import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.FutureInstances.* import com.digitalasset.canton.version.HasTestCloseContext -import com.google.protobuf.ByteString import org.scalatest.Assertion import org.scalatest.wordspec.{AnyWordSpec, AsyncWordSpec} @@ -105,9 +112,10 @@ sealed trait AcsCommitmentProcessorBaseTest h.getByteString() } - lazy val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + lazy val initialTransferCounter: TransferCounter = TransferCounter.Genesis + protected def ts(i: CantonTimestamp): CantonTimestampSecond = + CantonTimestampSecond.ofEpochSecond(i.getEpochSecond) protected def ts(i: Int): CantonTimestampSecond = CantonTimestampSecond.ofEpochSecond(i.longValue) protected def toc(timestamp: Int, requestCounter: Int = 0): TimeOfChange = @@ -115,47 +123,55 @@ sealed trait AcsCommitmentProcessorBaseTest protected def mkChangeIdHash(index: Int) = ChangeIdHash(DefaultDamlValues.lfhash(index)) + private lazy val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 1) + protected def acsSetup( contracts: Map[LfContractId, NonEmpty[Seq[Lifespan]]] )(implicit ec: ExecutionContext, traceContext: TraceContext): Future[ActiveContractSnapshot] = { - val acs = new InMemoryActiveContractStore(testedProtocolVersion, loggerFactory) + val acs = + new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory) contracts.toList .flatMap { case (cid, seq) => seq.forgetNE.map(lifespan => (cid, lifespan)) } .parTraverse_ { case (cid, lifespan) => for { _ <- { - if (lifespan.assignTransferCounter.forall(_ == TransferCounter.Genesis)) + if (lifespan.transferCounterAtActivation == TransferCounter.Genesis) acs - .markContractActive( + .markContractCreated( cid -> initialTransferCounter, - TimeOfChange(RequestCounter(0), lifespan.createdTs), + TimeOfChange(RequestCounter(0), lifespan.activatedTs), ) .value else acs .transferInContract( cid, - TimeOfChange(RequestCounter(0), lifespan.createdTs), + TimeOfChange(RequestCounter(0), lifespan.activatedTs), SourceDomainId(domainId), - lifespan.assignTransferCounter, + lifespan.transferCounterAtActivation, ) .value } - _ <- { - if (lifespan.unassignTransferCounter.isEmpty) + _ <- lifespan match { + case Lifespan.ArchiveOnDeactivate(_, deactivatedTs, _) => acs .archiveContract( cid, - TimeOfChange(RequestCounter(0), lifespan.archivedTs), + TimeOfChange(RequestCounter(0), deactivatedTs), ) .value - else + case Lifespan.TransferOutOnDeactivate( + _, + deactivatedTs, + _, + transferCounterAtTransferOut, + ) => acs .transferOutContract( cid, - TimeOfChange(RequestCounter(0), lifespan.archivedTs), + TimeOfChange(RequestCounter(0), lifespan.deactivatedTs), TargetDomainId(domainId), - lifespan.unassignTransferCounter, + transferCounterAtTransferOut, ) .value } @@ -167,10 +183,21 @@ sealed trait AcsCommitmentProcessorBaseTest protected def cryptoSetup( owner: ParticipantId, topology: Map[ParticipantId, Set[LfPartyId]], + dynamicDomainParametersWithValidity: List[ + DomainParameters.WithValidity[DynamicDomainParameters] + ] = List.empty, ): SyncCryptoClient[DomainSnapshotSyncCryptoApi] = { + val topologyWithPermissions = topology.fmap(_.map(p => (p, ParticipantPermission.Submission)).toMap) - TestingTopologyX() + + val testingTopology = dynamicDomainParametersWithValidity match { + // this way we get default values for an empty List + case Nil => TestingTopologyX() + case _ => TestingTopologyX(domainParameters = dynamicDomainParametersWithValidity) + } + + testingTopology .withReversedTopology(topologyWithPermissions) .build() .forOwnerAndDomain(owner) @@ -179,7 +206,7 @@ sealed trait AcsCommitmentProcessorBaseTest protected def changesAtToc( contractSetup: Map[ LfContractId, - (Set[LfPartyId], TimeOfChange, TimeOfChange, TransferCounterO, TransferCounterO), + (Set[LfPartyId], TimeOfChange, TimeOfChange, TransferCounter, TransferCounter), ] )(toc: TimeOfChange): (CantonTimestamp, RequestCounter, AcsChange) = { ( @@ -227,7 +254,7 @@ sealed trait AcsCommitmentProcessorBaseTest timeProofs: List[CantonTimestamp], contractSetup: Map[ LfContractId, - (Set[LfPartyId], TimeOfChange, TimeOfChange, TransferCounterO, TransferCounterO), + (Set[LfPartyId], TimeOfChange, TimeOfChange, TransferCounter, TransferCounter), ], topology: Map[ParticipantId, Set[LfPartyId]], optCommitmentStore: Option[AcsCommitmentStore] = None, @@ -235,18 +262,43 @@ sealed trait AcsCommitmentProcessorBaseTest SortedReconciliationIntervalsProvider ] = None, acsCommitmentsCatchUpModeEnabled: Boolean = false, + domainParametersUpdates: List[DomainParameters.WithValidity[DynamicDomainParameters]] = + List.empty, )(implicit ec: ExecutionContext): ( AcsCommitmentProcessor, AcsCommitmentStore, TestSequencerClientSend, List[(CantonTimestamp, RequestCounter, AcsChange)], ) = { - val domainCrypto = cryptoSetup(localId, topology) + + val acsCommitmentsCatchUpConfig = + if (acsCommitmentsCatchUpModeEnabled) + Some(AcsCommitmentsCatchUpConfig(PositiveInt.tryCreate(2), PositiveInt.tryCreate(1))) + else None + + val domainCrypto = cryptoSetup( + localId, + topology, + domainParametersUpdates.appended( + DomainParameters.WithValidity( + validFrom = CantonTimestamp.MinValue, + validUntil = domainParametersUpdates + .sortBy(_.validFrom) + .headOption + .fold(Some(CantonTimestamp.MaxValue))(param => Some(param.validFrom)), + parameter = defaultParameters.tryUpdate(acsCommitmentsCatchUpConfigParameter = + acsCommitmentsCatchUpConfig + ), + ) + ), + ) val sequencerClient = new TestSequencerClientSend val changeTimes = - (timeProofs.map(ts => TimeOfChange(RequestCounter(0), ts)) ++ contractSetup.values.toList + (timeProofs + .map(time => time.plusSeconds(1)) + .map(ts => TimeOfChange(RequestCounter(0), ts)) ++ contractSetup.values.toList .flatMap { case (_, creationTs, archivalTs, _, _) => List(creationTs, archivalTs) }).distinct.sorted @@ -258,10 +310,7 @@ sealed trait AcsCommitmentProcessorBaseTest constantSortedReconciliationIntervalsProvider(interval) } - val acsCommitmentsCatchUpConfig = - if (acsCommitmentsCatchUpModeEnabled) - Some(AcsCommitmentsCatchUpConfig(PositiveInt.tryCreate(2), PositiveInt.tryCreate(1))) - else None + val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = 1) val acsCommitmentProcessor = new AcsCommitmentProcessor( domainId, @@ -273,11 +322,10 @@ sealed trait AcsCommitmentProcessorBaseTest _ => (), ParticipantTestMetrics.pruning, testedProtocolVersion, - acsCommitmentsCatchUpConfig, DefaultProcessingTimeouts.testing .copy(storageMaxRetryInterval = NonNegativeDuration.tryFromDuration(1.millisecond)), futureSupervisor, - new InMemoryActiveContractStore(testedProtocolVersion, loggerFactory), + new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory), new InMemoryContractStore(loggerFactory), // no additional consistency checks; if enabled, one needs to populate the above ACS and contract stores // correctly, otherwise the test will fail @@ -289,10 +337,9 @@ sealed trait AcsCommitmentProcessorBaseTest protected def testSetup( timeProofs: List[CantonTimestamp], - // contractSetup: Map[LfContractId, (Set[LfPartyId], TimeOfChange, TimeOfChange)], contractSetup: Map[ LfContractId, - (Set[LfPartyId], TimeOfChange, TimeOfChange, TransferCounterO, TransferCounterO), + (Set[LfPartyId], TimeOfChange, TimeOfChange, TransferCounter, TransferCounter), ], topology: Map[ParticipantId, Set[LfPartyId]], optCommitmentStore: Option[AcsCommitmentStore] = None, @@ -322,8 +369,8 @@ sealed trait AcsCommitmentProcessorBaseTest Map[LfContractId, (Set[Ref.IdString.Party], NonEmpty[Seq[Lifespan]])], Map[CantonTimestampSecond, AcsChange], ) = { - val tc2 = initialTransferCounter.map(_ + 1) - val tc3 = initialTransferCounter.map(_ + 2) + val tc2 = initialTransferCounter + 1 + val tc3 = initialTransferCounter + 2 val contracts = Map( ( coid(0, 0), @@ -331,7 +378,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(4).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(4).forgetRefinement, + initialTransferCounter, + ): Lifespan, ), ), ), @@ -341,8 +392,19 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(4).forgetRefinement, initialTransferCounter, tc2), - Lifespan(ts(7).forgetRefinement, ts(8).forgetRefinement, tc2, None), + Lifespan.TransferOutOnDeactivate( + ts(2).forgetRefinement, + ts(4).forgetRefinement, + initialTransferCounter, + tc2, + ): Lifespan, + Lifespan + .TransferOutOnDeactivate( + ts(7).forgetRefinement, + ts(8).forgetRefinement, + tc2, + tc2, + ): Lifespan, ), ), ), @@ -352,8 +414,19 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob, carol), NonEmpty.mk( Seq, - Lifespan(ts(7).forgetRefinement, ts(8).forgetRefinement, initialTransferCounter, tc2), - Lifespan(ts(10).forgetRefinement, ts(12).forgetRefinement, tc2, tc3), + Lifespan.TransferOutOnDeactivate( + ts(7).forgetRefinement, + ts(8).forgetRefinement, + initialTransferCounter, + tc2, + ): Lifespan, + Lifespan + .TransferOutOnDeactivate( + ts(10).forgetRefinement, + ts(12).forgetRefinement, + tc2, + tc3, + ): Lifespan, ), ), ), @@ -363,7 +436,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob, carol), NonEmpty.mk( Seq, - Lifespan(ts(9).forgetRefinement, ts(9).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(9).forgetRefinement, + ts(9).forgetRefinement, + initialTransferCounter, + ): Lifespan, ), ), ), @@ -388,7 +465,11 @@ sealed trait AcsCommitmentProcessorBaseTest transferOuts = Map.empty[LfContractId, WithContractHash[TransferOutCommit]], transferIns = Map.empty[LfContractId, WithContractHash[TransferInCommit]], ) - val acs2 = AcsChange.fromCommitSet(cs2, Map.empty[LfContractId, TransferCounterO]) + val acs2 = AcsChange.tryFromCommitSet( + cs2, + Map.empty[LfContractId, TransferCounter], + Map.empty[LfContractId, TransferCounter], + ) val cs4 = CommitSet( creations = Map.empty[LfContractId, WithContractHash[CreationCommit]], @@ -410,9 +491,10 @@ sealed trait AcsCommitmentProcessorBaseTest ), transferIns = Map.empty[LfContractId, WithContractHash[TransferInCommit]], ) - val acs4 = AcsChange.fromCommitSet( + val acs4 = AcsChange.tryFromCommitSet( cs4, - Map[LfContractId, TransferCounterO](coid(0, 0) -> initialTransferCounter), + Map[LfContractId, TransferCounter](coid(0, 0) -> initialTransferCounter), + Map.empty[LfContractId, TransferCounter], ) val cs7 = CommitSet( @@ -436,7 +518,11 @@ sealed trait AcsCommitmentProcessorBaseTest ) ), ) - val acs7 = AcsChange.fromCommitSet(cs7, Map.empty[LfContractId, TransferCounterO]) + val acs7 = AcsChange.tryFromCommitSet( + cs7, + Map.empty[LfContractId, TransferCounter], + Map.empty[LfContractId, TransferCounter], + ) val cs8 = CommitSet( creations = Map.empty[LfContractId, WithContractHash[CreationCommit]], @@ -459,7 +545,11 @@ sealed trait AcsCommitmentProcessorBaseTest transferIns = Map.empty[LfContractId, WithContractHash[TransferInCommit]], ) val acs8 = - AcsChange.fromCommitSet(cs8, Map[LfContractId, TransferCounterO](coid(1, 0) -> tc2)) + AcsChange.tryFromCommitSet( + cs8, + Map[LfContractId, TransferCounter](coid(1, 0) -> tc2), + Map.empty[LfContractId, TransferCounter], + ) val cs9 = CommitSet( creations = Map[LfContractId, WithContractHash[CreationCommit]]( @@ -480,9 +570,10 @@ sealed trait AcsCommitmentProcessorBaseTest transferOuts = Map.empty[LfContractId, WithContractHash[TransferOutCommit]], transferIns = Map.empty[LfContractId, WithContractHash[TransferInCommit]], ) - val acs9 = AcsChange.fromCommitSet( + val acs9 = AcsChange.tryFromCommitSet( cs9, - Map[LfContractId, TransferCounterO](coid(3, 0) -> initialTransferCounter), + Map.empty[LfContractId, TransferCounter], + Map[LfContractId, TransferCounter](coid(3, 0) -> initialTransferCounter), ) val cs10 = CommitSet( @@ -499,7 +590,11 @@ sealed trait AcsCommitmentProcessorBaseTest ) ), ) - val acs10 = AcsChange.fromCommitSet(cs10, Map.empty[LfContractId, TransferCounterO]) + val acs10 = AcsChange.tryFromCommitSet( + cs10, + Map.empty[LfContractId, TransferCounter], + Map.empty[LfContractId, TransferCounter], + ) val cs12 = CommitSet( creations = Map.empty[LfContractId, WithContractHash[CreationCommit]], @@ -515,7 +610,11 @@ sealed trait AcsCommitmentProcessorBaseTest ), transferIns = Map.empty[LfContractId, WithContractHash[TransferInCommit]], ) - val acs12 = AcsChange.fromCommitSet(cs12, Map.empty[LfContractId, TransferCounterO]) + val acs12 = AcsChange.tryFromCommitSet( + cs12, + Map.empty[LfContractId, TransferCounter], + Map.empty[LfContractId, TransferCounter], + ) val acsChanges = Map( ts(2) -> acs2, @@ -544,7 +643,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(4).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(4).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -554,7 +657,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(6).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(6).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -564,7 +671,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob, carol), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(10).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(10).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -574,7 +685,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, bob, carol), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(4).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(4).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -584,7 +699,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, danna), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(14).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(14).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -594,7 +713,11 @@ sealed trait AcsCommitmentProcessorBaseTest Set(alice, ed), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(18).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(18).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -643,7 +766,11 @@ sealed trait AcsCommitmentProcessorBaseTest transferOuts = Map.empty[LfContractId, WithContractHash[TransferOutCommit]], transferIns = Map.empty[LfContractId, WithContractHash[TransferInCommit]], ) - val acs2 = AcsChange.fromCommitSet(cs2, Map.empty[LfContractId, TransferCounterO]) + val acs2 = AcsChange.tryFromCommitSet( + cs2, + Map.empty[LfContractId, TransferCounter], + Map.empty[LfContractId, TransferCounter], + ) val cs4 = CommitSet( creations = Map.empty[LfContractId, WithContractHash[CreationCommit]], @@ -662,20 +789,28 @@ sealed trait AcsCommitmentProcessorBaseTest transferOuts = Map.empty[LfContractId, WithContractHash[TransferOutCommit]], transferIns = Map.empty[LfContractId, WithContractHash[TransferInCommit]], ) - val acs4 = AcsChange.fromCommitSet(cs4, Map.empty[LfContractId, TransferCounterO]) + val acs4 = AcsChange.tryFromCommitSet( + cs4, + Map[LfContractId, TransferCounter]( + coid(0, 0) -> initialTransferCounter, + coid(3, 0) -> initialTransferCounter, + ), + Map.empty[LfContractId, TransferCounter], + ) val acsChanges = Map(ts(2) -> acs2, ts(4) -> acs4) (contracts, acsChanges) } - val testHash: LfHash = ExampleTransactionFactory.lfHash(0) + protected val testHash: LfHash = ExampleTransactionFactory.lfHash(0) protected def withTestHash[A]: A => WithContractHash[A] = WithContractHash[A](_, testHash) protected def rt(timestamp: Int, tieBreaker: Int): RecordTime = RecordTime(ts(timestamp).forgetRefinement, tieBreaker.toLong) - val coid = (txId, discriminator) => ExampleTransactionFactory.suffixedId(txId, discriminator) + protected val coid = (txId, discriminator) => + ExampleTransactionFactory.suffixedId(txId, discriminator) } class AcsCommitmentProcessorTest @@ -686,15 +821,13 @@ class AcsCommitmentProcessorTest // if we want to test whether commitment buffering works // Also assumes that all the contracts in the map have the same stakeholders private def stakeholderCommitment( - contracts: Map[LfContractId, TransferCounterO] + contracts: Map[LfContractId, TransferCounter] ): AcsCommitment.CommitmentType = { val h = LtHash16() contracts.keySet.foreach { cid => h.add( (testHash.bytes.toByteString concat cid.encodeDeterministically - concat contracts(cid).fold(ByteString.EMPTY)( - TransferCounter.encodeDeterministically - )).toByteArray + concat TransferCounter.encodeDeterministically(contracts(cid))).toByteArray ) } h.getByteString() @@ -741,7 +874,7 @@ class AcsCommitmentProcessorTest private def commitmentMsg( params: ( ParticipantId, - Map[LfContractId, TransferCounterO], + Map[LfContractId, TransferCounter], CantonTimestampSecond, CantonTimestampSecond, ) @@ -813,7 +946,7 @@ class AcsCommitmentProcessorTest private def addCommonContractId( rc: RunningCommitments, hash: LfHash, - transferCounter: TransferCounterO, + transferCounter: TransferCounter, ): (AcsCommitment.CommitmentType, AcsCommitment.CommitmentType) = { val commonContractId = coid(0, 0) rc.watermark shouldBe RecordTime.MinValue @@ -929,7 +1062,11 @@ class AcsCommitmentProcessorTest Set(alice, bob), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(4).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(4).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -939,7 +1076,11 @@ class AcsCommitmentProcessorTest Set(alice, bob), NonEmpty.mk( Seq, - Lifespan(ts(2).forgetRefinement, ts(5).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(2).forgetRefinement, + ts(5).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -949,7 +1090,11 @@ class AcsCommitmentProcessorTest Set(alice, bob, carol), NonEmpty.mk( Seq, - Lifespan(ts(7).forgetRefinement, ts(8).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(7).forgetRefinement, + ts(8).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -959,7 +1104,11 @@ class AcsCommitmentProcessorTest Set(alice, bob, carol), NonEmpty.mk( Seq, - Lifespan(ts(9).forgetRefinement, ts(9).forgetRefinement, initialTransferCounter, None), + Lifespan.ArchiveOnDeactivate( + ts(9).forgetRefinement, + ts(9).forgetRefinement, + initialTransferCounter, + ), ), ), ), @@ -969,11 +1118,10 @@ class AcsCommitmentProcessorTest Set(bob, carol), NonEmpty.mk( Seq, - Lifespan( + Lifespan.ArchiveOnDeactivate( ts(11).forgetRefinement, ts(13).forgetRefinement, initialTransferCounter, - None, ), ), ), @@ -1120,10 +1268,8 @@ class AcsCommitmentProcessorTest processor.processBatchInternal(ts.forgetRefinement, batch) } .onShutdown(fail()) - _ = changes.foreach { case (ts, tb, change) => - processor.publish(RecordTime(ts, tb.v), change) - } - _ <- processor.flush() + _ <- processChanges(processor, store, changes) + computed <- store.searchComputedBetween(CantonTimestamp.Epoch, timeProofs.lastOption.value) received <- store.searchReceivedBetween(CantonTimestamp.Epoch, timeProofs.lastOption.value) } yield { @@ -1715,7 +1861,7 @@ class AcsCommitmentProcessorTest val rc2 = new pruning.AcsCommitmentProcessor.RunningCommitments(RecordTime.MinValue, TrieMap.empty) val hash = ExampleTransactionFactory.lfHash(1) - val tc2 = initialTransferCounter.map(_ + 1) + val tc2 = initialTransferCounter + 1 val (activeCommitment1, deltaAddedCommitment1) = addCommonContractId(rc1, hash, initialTransferCounter) @@ -1734,9 +1880,9 @@ class AcsCommitmentProcessorTest val hash3 = ExampleTransactionFactory.lfHash(3) val cid4 = coid(0, 4) val hash4 = ExampleTransactionFactory.lfHash(4) - val tc1 = initialTransferCounter.map(_ + 1) - val tc2 = initialTransferCounter.map(_ + 2) - val tc3 = initialTransferCounter.map(_ + 3) + val tc1 = initialTransferCounter + 1 + val tc2 = initialTransferCounter + 2 + val tc3 = initialTransferCounter + 3 val cs = CommitSet( creations = Map[LfContractId, WithContractHash[CreationCommit]]( @@ -1771,26 +1917,30 @@ class AcsCommitmentProcessorTest ), ) - // Omitting "cid3 -> None" to test that the code considers the missing cid to have transfer counter None - val transferCounterOfArchival = - Map[LfContractId, TransferCounterO](cid1 -> None, cid4 -> tc3) + val transferCounterOfArchival = Map[LfContractId, TransferCounter](cid4 -> tc3) - val acs1 = AcsChange.fromCommitSet(cs, transferCounterOfArchival) + val transferCountersForArchivedTransient = AcsChange.transferCountersForArchivedTransient(cs) + val acs1 = + AcsChange.tryFromCommitSet( + cs, + transferCounterOfArchival, + transferCountersForArchivedTransient, + ) // cid1 is a transient creation with transfer counter initialTransferCounter and should not appear in the ACS change - AcsChange.transferCountersforArchivedCidInclTransient( - cid1, - cs, - transferCounterOfArchival, - ) shouldBe initialTransferCounter + transferCountersForArchivedTransient + .get(cid1) + .fold( + fail(s"$cid1 should be transient, but is not in $transferCountersForArchivedTransient") + )(_ shouldBe initialTransferCounter) acs1.activations.get(cid1) shouldBe None acs1.deactivations.get(cid1) shouldBe None // cid3 is a transient transfer-in and should not appear in the ACS change - AcsChange.transferCountersforArchivedCidInclTransient( - cid3, - cs, - transferCounterOfArchival, - ) shouldBe tc1 + transferCountersForArchivedTransient + .get(cid3) + .fold( + fail(s"$cid3 should be transient, but is not in $transferCountersForArchivedTransient") + )(_ shouldBe tc1) acs1.activations.get(cid3) shouldBe None acs1.deactivations.get(cid3) shouldBe None // transfer-out cid2 is a deactivation with transfer counter tc2 @@ -1879,12 +2029,35 @@ class AcsCommitmentProcessorTest "use catch-up logic correctly:" must { - def checkCatchUpModeCfgCorrect(processor: pruning.AcsCommitmentProcessor): Assertion = { - processor.acsCommitmentsCatchUpConfig match { - case Some(cfg) => - assert(cfg.nrIntervalsToTriggerCatchUp == PositiveInt.tryCreate(1)) - assert(cfg.catchUpIntervalSkip == PositiveInt.tryCreate(2)) - case None => fail("catch up mode needs to be enabled") + def checkCatchUpModeCfgCorrect( + processor: pruning.AcsCommitmentProcessor, + cantonTimestamp: CantonTimestamp, + nrIntervalsToTriggerCatchUp: PositiveInt = PositiveInt.tryCreate(1), + catchUpIntervalSkip: PositiveInt = PositiveInt.tryCreate(2), + ): Future[Assertion] = { + for { + config <- processor.catchUpConfig(cantonTimestamp) + } yield { + config match { + case Some(cfg) => + assert(cfg.nrIntervalsToTriggerCatchUp == nrIntervalsToTriggerCatchUp) + assert(cfg.catchUpIntervalSkip == catchUpIntervalSkip) + case None => fail("catch up mode needs to be enabled") + } + } + } + + def checkCatchUpModeCfgDisabled( + processor: pruning.AcsCommitmentProcessor, + cantonTimestamp: CantonTimestamp, + ): Future[Assertion] = { + for { + config <- processor.catchUpConfig(cantonTimestamp) + } yield { + config match { + case Some(cfg) => fail(s"Canton config is defined ($cfg) at $cantonTimestamp") + case None => succeed + } } } @@ -1920,8 +2093,6 @@ class AcsCommitmentProcessorTest acsCommitmentsCatchUpModeEnabled = true, ) - checkCatchUpModeCfgCorrect(processor) - val remoteCommitments = List( (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(0), ts(5)), (remoteId2, Map((coid(0, 1), initialTransferCounter)), ts(10), ts(15)), @@ -1936,6 +2107,7 @@ class AcsCommitmentProcessorTest ) for { + _ <- checkCatchUpModeCfgCorrect(processor, timeProofs.head) remote <- remoteCommitments.parTraverse(commitmentMsg) delivered = remote.map(cmt => ( @@ -1950,15 +2122,14 @@ class AcsCommitmentProcessorTest processor.processBatchInternal(ts.forgetRefinement, batch) } .onShutdown(fail()) - _ = changes.foreach { case (ts, tb, change) => - processor.publish(RecordTime(ts, tb.v), change) - } - _ <- processor.flush() + _ <- processChanges(processor, store, changes) + outstanding <- store.noOutstandingCommitments(timeProofs.lastOption.value) - computed <- store.searchComputedBetween( + computedAll <- store.searchComputedBetween( CantonTimestamp.Epoch, timeProofs.lastOption.value, ) + computed = computedAll.filter(_._2 != localId) received <- store.searchReceivedBetween( CantonTimestamp.Epoch, timeProofs.lastOption.value, @@ -1968,7 +2139,8 @@ class AcsCommitmentProcessorTest // the only ticks with non-empty commitments are at 20 and 30, and they match the remote ones, // therefore there are 2 sends of commitments sequencerClient.requests.size shouldBe 2 - assert(computed.size === 4) + // compute commitments only for interval ends 20 and 30 + assert(computed.size === 2) assert(received.size === 5) // all local commitments were matched and can be pruned assert(outstanding == Some(toc(55).timestamp)) @@ -2007,8 +2179,6 @@ class AcsCommitmentProcessorTest acsCommitmentsCatchUpModeEnabled = true, ) - checkCatchUpModeCfgCorrect(processor) - val remoteCommitments = List( (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(0), ts(5)), (remoteId2, Map((coid(0, 1), initialTransferCounter)), ts(10), ts(15)), @@ -2022,6 +2192,7 @@ class AcsCommitmentProcessorTest ) for { + _ <- checkCatchUpModeCfgCorrect(processor, timeProofs.head) remote <- remoteCommitments.parTraverse(commitmentMsg) delivered = remote.map(cmt => ( @@ -2031,10 +2202,8 @@ class AcsCommitmentProcessorTest ) // First ask for the local commitments to be processed, and then receive the remote ones, // because the remote participants are catching up - _ = changes.foreach { case (ts, tb, change) => - processor.publish(RecordTime(ts, tb.v), change) - } - _ <- processor.flush() + _ <- processChanges(processor, store, changes) + _ <- delivered .parTraverse_ { case (ts, batch) => processor.processBatchInternal(ts.forgetRefinement, batch) @@ -2093,8 +2262,6 @@ class AcsCommitmentProcessorTest acsCommitmentsCatchUpModeEnabled = true, ) - checkCatchUpModeCfgCorrect(processor) - val remoteCommitments = List( (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(0), ts(5)), (remoteId2, Map((coid(0, 1), initialTransferCounter)), ts(10), ts(15)), @@ -2102,8 +2269,8 @@ class AcsCommitmentProcessorTest ( remoteId2, Map( - (coid(1, 1), initialTransferCounter.map(_ + 1)), - (coid(2, 1), initialTransferCounter.map(_ + 2)), + (coid(1, 1), initialTransferCounter + 1), + (coid(2, 1), initialTransferCounter + 2), ), ts(15), ts(20), @@ -2113,6 +2280,7 @@ class AcsCommitmentProcessorTest ) for { + _ <- checkCatchUpModeCfgCorrect(processor, timeProofs.head) remote <- remoteCommitments.parTraverse(commitmentMsg) delivered = remote.map(cmt => ( @@ -2149,10 +2317,11 @@ class AcsCommitmentProcessorTest ) outstanding <- store.noOutstandingCommitments(toc(30).timestamp) - computed <- store.searchComputedBetween( + computedAll <- store.searchComputedBetween( CantonTimestamp.Epoch, timeProofs.lastOption.value, ) + computed = computedAll.filter(_._2 != localId) received <- store.searchReceivedBetween( CantonTimestamp.Epoch, timeProofs.lastOption.value, @@ -2165,12 +2334,403 @@ class AcsCommitmentProcessorTest // which means we send the fine-grained commitment 10-15 // therefore, there should be 3 async sends in total sequencerClient.requests.size shouldBe 3 - assert(computed.size === 4) + // compute commitments for interval ends 10, 20, and one for the mismatch at interval end 15 + assert(computed.size === 3) assert(received.size === 5) // cannot prune past the mismatch assert(outstanding == Some(toc(30).timestamp)) } } + "dynamically change, disable & re-enable catch-up config during a catch-up" in { + val reconciliationInterval = 5 + val testSequences = + List( + // we split them up by large amounts to avoid potential overlaps + (1L to 5) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList, + (101L to 105) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList, + (201L to 205) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList, + ) + + val contractSetup = Map( + // contract ID to stakeholders, creation and archival time + ( + coid(0, 0), + (Set(alice, bob), toc(1), toc(20000), initialTransferCounter, initialTransferCounter), + ) + ) + + val topology = Map( + localId -> Set(alice), + remoteId1 -> Set(bob), + ) + + val midConfig = + new AcsCommitmentsCatchUpConfig(PositiveInt.tryCreate(1), PositiveInt.tryCreate(2)) + val changedConfigWithValidity = DomainParameters.WithValidity( + validFrom = testSequences.last.head, + validUntil = None, + parameter = + defaultParameters.tryUpdate(acsCommitmentsCatchUpConfigParameter = Some(midConfig)), + ) + + val disabledConfigWithValidity = DomainParameters.WithValidity( + validFrom = testSequences.apply(1).head, + validUntil = Some(testSequences.apply(1).last), + parameter = defaultParameters, + ) + + val (processor, store, sequencerClient, changes) = + testSetupDontPublish( + testSequences.flatten, + contractSetup, + topology, + acsCommitmentsCatchUpModeEnabled = true, + domainParametersUpdates = List(disabledConfigWithValidity, changedConfigWithValidity), + ) + + for { + _ <- checkCatchUpModeCfgCorrect(processor, testSequences.head.head) + _ <- checkCatchUpModeCfgDisabled(processor, testSequences.apply(1).last) + _ <- checkCatchUpModeCfgCorrect( + processor, + testSequences.last.last, + nrIntervalsToTriggerCatchUp = midConfig.nrIntervalsToTriggerCatchUp, + catchUpIntervalSkip = midConfig.catchUpIntervalSkip, + ) + + // we apply any changes (contract deployment) that happens before our windows + _ = changes + .filter(a => a._1 <= testSequences.head.head) + .foreach { case (ts, tb, change) => + processor.publish(RecordTime(ts, tb.v), change) + } + _ <- processor.flush() + _ <- testSequence( + testSequences.head, + processor, + changes, + store, + reconciliationInterval, + expectDegradation = true, + ) + // catchup is enabled so we send only 3 commitments + _ = sequencerClient.requests.size shouldBe 3 + _ <- testSequence( + testSequences.apply(1), + processor, + changes, + store, + reconciliationInterval, + ) + // catchup is disabled so we send all 5 commitments (plus 3 previous) + _ = sequencerClient.requests.size shouldBe (3 + 5) + _ <- testSequence( + testSequences.last, + processor, + changes, + store, + reconciliationInterval, + expectDegradation = true, + ) + // catchup is re-enabled so we send only 3 commitments (plus 5 & 3 previous) + _ = sequencerClient.requests.size shouldBe (3 + 5 + 3) + } yield { + succeed + } + } + + "disable catch-up config during catch-up mode" in { + val reconciliationInterval = 5 + val testSequences = + (1L to 10) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList + val changeConfigTimestamp = CantonTimestamp.ofEpochSecond(36L) + val contractSetup = Map( + // contract ID to stakeholders, creation and archival time + ( + coid(0, 0), + (Set(alice, bob), toc(1), toc(20000), initialTransferCounter, initialTransferCounter), + ) + ) + + val topology = Map( + localId -> Set(alice), + remoteId1 -> Set(bob), + ) + + val startConfig = + new AcsCommitmentsCatchUpConfig(PositiveInt.tryCreate(3), PositiveInt.tryCreate(1)) + val startConfigWithValidity = DomainParameters.WithValidity( + validFrom = testSequences.head.addMicros(-1), + validUntil = Some(changeConfigTimestamp), + parameter = + defaultParameters.tryUpdate(acsCommitmentsCatchUpConfigParameter = Some(startConfig)), + ) + + val disabledConfigWithValidity = DomainParameters.WithValidity( + validFrom = changeConfigTimestamp, + validUntil = None, + parameter = defaultParameters, + ) + val (processor, store, sequencerClient, changes) = + testSetupDontPublish( + testSequences, + contractSetup, + topology, + acsCommitmentsCatchUpModeEnabled = true, + domainParametersUpdates = List(startConfigWithValidity, disabledConfigWithValidity), + ) + + for { + _ <- checkCatchUpModeCfgCorrect( + processor, + testSequences.head, + startConfig.nrIntervalsToTriggerCatchUp, + startConfig.catchUpIntervalSkip, + ) + _ <- checkCatchUpModeCfgDisabled(processor, testSequences.last) + + // we apply any changes (contract deployment) that happens before our windows + _ = changes + .filter(a => a._1 <= testSequences.head) + .foreach { case (ts, tb, change) => + processor.publish(RecordTime(ts, tb.v), change) + } + _ <- processor.flush() + _ <- testSequence( + testSequences, + processor, + changes, + store, + reconciliationInterval, + ) + // here we get the times: [5,10,15,20,25,30,35,40,45,50] + // we disable the config at 36. + // expected send timestamps are: [5,15,30,45,50,55] + _ = sequencerClient.requests.size shouldBe 6 + } yield { + succeed + } + } + + "change catch-up config during catch-up mode" in { + val reconciliationInterval = 5 + val testSequences = + (1L to 11) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList + val changeConfigTimestamp = CantonTimestamp.ofEpochSecond(36L) + val contractSetup = Map( + // contract ID to stakeholders, creation and archival time + ( + coid(0, 0), + (Set(alice, bob), toc(1), toc(20000), initialTransferCounter, initialTransferCounter), + ) + ) + + val topology = Map( + localId -> Set(alice), + remoteId1 -> Set(bob), + ) + + val startConfig = + new AcsCommitmentsCatchUpConfig(PositiveInt.tryCreate(3), PositiveInt.tryCreate(1)) + val startConfigWithValidity = DomainParameters.WithValidity( + validFrom = testSequences.head.addMicros(-1), + validUntil = Some(changeConfigTimestamp), + parameter = + defaultParameters.tryUpdate(acsCommitmentsCatchUpConfigParameter = Some(startConfig)), + ) + + val changeConfig = + new AcsCommitmentsCatchUpConfig(PositiveInt.tryCreate(2), PositiveInt.tryCreate(1)) + val changeConfigWithValidity = DomainParameters.WithValidity( + validFrom = changeConfigTimestamp, + validUntil = None, + parameter = + defaultParameters.tryUpdate(acsCommitmentsCatchUpConfigParameter = Some(changeConfig)), + ) + val (processor, store, sequencerClient, changes) = + testSetupDontPublish( + testSequences, + contractSetup, + topology, + acsCommitmentsCatchUpModeEnabled = true, + domainParametersUpdates = List(startConfigWithValidity, changeConfigWithValidity), + ) + + for { + _ <- checkCatchUpModeCfgCorrect( + processor, + testSequences.head, + startConfig.nrIntervalsToTriggerCatchUp, + startConfig.catchUpIntervalSkip, + ) + _ <- checkCatchUpModeCfgCorrect( + processor, + testSequences.last, + changeConfig.nrIntervalsToTriggerCatchUp, + changeConfig.catchUpIntervalSkip, + ) + + // we apply any changes (contract deployment) that happens before our windows + _ = changes + .filter(a => a._1 <= testSequences.head) + .foreach { case (ts, tb, change) => + processor.publish(RecordTime(ts, tb.v), change) + } + _ <- processor.flush() + _ <- testSequence( + testSequences, + processor, + changes, + store, + reconciliationInterval, + expectDegradation = true, + ) + // here we get the times: [5,10,15,20,25,30,35,40,45,50,55] + // we change the config at 36. + // expected send timestamps are: [5,15,30,50] + _ = sequencerClient.requests.size shouldBe 4 + } yield { + succeed + } + } + "should mark as unhealthy when not caught up" in { + val reconciliationInterval = 5 + val testSequences = + (1L to 10) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList + + val timeProofs = + (1L to 5) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList + + val contractSetup = Map( + // contract ID to stakeholders, creation and archival time + ( + coid(0, 0), + (Set(alice, bob), toc(1), toc(20000), initialTransferCounter, initialTransferCounter), + ) + ) + + val topology = Map( + localId -> Set(alice), + remoteId1 -> Set(bob), + ) + + val (processor, store, sequencerClient, changes) = + testSetupDontPublish( + timeProofs, + contractSetup, + topology, + acsCommitmentsCatchUpModeEnabled = true, + ) + + for { + // we apply any changes (contract deployment) that happens before our windows + _ <- Future.successful( + changes + .filter(a => a._1 < testSequences.head) + .foreach { case (ts, tb, change) => + processor.publish(RecordTime(ts, tb.v), change) + + } + ) + _ <- processor.flush() + + _ <- testSequence( + testSequences, + processor, + changes, + store, + reconciliationInterval, + expectDegradation = true, + ) + _ = assert(processor.healthComponent.isDegrading) + } yield { + succeed + } + } + + def testSequence( + sequence: List[CantonTimestamp], + processor: AcsCommitmentProcessor, + changes: List[(CantonTimestamp, RequestCounter, AcsChange)], + store: AcsCommitmentStore, + reconciliationInterval: Int, + expectDegradation: Boolean = false, + ): Future[Assertion] = { + val remoteCommitments = sequence + .map(i => + ( + remoteId1, + Map((coid(0, 0), initialTransferCounter)), + ts(i), + ts(i.plusSeconds(reconciliationInterval.toLong)), + ) + ) + for { + remote <- remoteCommitments.parTraverse(commitmentMsg) + delivered = remote.map(cmt => + ( + cmt.message.period.toInclusive, + List(OpenEnvelope(cmt, Recipients.cc(localId))(testedProtocolVersion)), + ) + ) + endOfRemoteCommitsPeriod = sequence.last.plusSeconds( + reconciliationInterval.toLong + ) + + changesApplied = changes + .filter(a => a._1 >= sequence.head && a._1 <= endOfRemoteCommitsPeriod) + + // First ask for the remote commitments to be processed, and then compute locally + _ <- delivered + .parTraverse_ { case (ts, batch) => + processor.processBatchInternal(ts.forgetRefinement, batch) + } + .onShutdown(fail()) + _ <- processChanges( + processor, + store, + changesApplied, + ) + received <- store.searchReceivedBetween( + sequence.head, + endOfRemoteCommitsPeriod, + ) + computed <- store.searchComputedBetween( + sequence.head, + endOfRemoteCommitsPeriod, + ) + } yield { + if (expectDegradation) + assert(processor.healthComponent.isDegrading) + else { + assert(processor.healthComponent.isOk) + } + if (changesApplied.last._1 >= sequence.last) + assert(computed.size === sequence.length) + assert(received.size === sequence.length) + } + } "prune correctly on mismatch during catch-up" in { @@ -2205,8 +2765,6 @@ class AcsCommitmentProcessorTest acsCommitmentsCatchUpModeEnabled = true, ) - checkCatchUpModeCfgCorrect(processor) - val remoteCommitments = List( (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(0), ts(5)), (remoteId2, Map((coid(0, 1), initialTransferCounter)), ts(10), ts(15)), @@ -2214,8 +2772,8 @@ class AcsCommitmentProcessorTest ( remoteId2, Map( - (coid(1, 1), initialTransferCounter.map(_ + 1)), - (coid(2, 1), initialTransferCounter.map(_ + 2)), + (coid(1, 1), initialTransferCounter + 1), + (coid(2, 1), initialTransferCounter + 2), ), ts(15), ts(20), @@ -2226,6 +2784,7 @@ class AcsCommitmentProcessorTest ) for { + _ <- checkCatchUpModeCfgCorrect(processor, timeProofs.head) remote <- remoteCommitments.parTraverse(commitmentMsg) delivered = remote.map(cmt => ( @@ -2262,10 +2821,11 @@ class AcsCommitmentProcessorTest ) outstanding <- store.noOutstandingCommitments(toc(30).timestamp) - computed <- store.searchComputedBetween( + computedAll <- store.searchComputedBetween( CantonTimestamp.Epoch, timeProofs.lastOption.value, ) + computed = computedAll.filter(_._2 != localId) received <- store.searchReceivedBetween( CantonTimestamp.Epoch, timeProofs.lastOption.value, @@ -2280,14 +2840,349 @@ class AcsCommitmentProcessorTest // we only observed the interval 20-30, for which we already sent a commitment. // therefore, there should be 3 async sends in total sequencerClient.requests.size shouldBe 3 - assert(computed.size === 4) + // compute commitments for interval ends 10, 20, and one for the mismatch at interval end 15 + assert(computed.size === 3) assert(received.size === 5) // cannot prune past the mismatch 25-30, because there are no commitments that match past this point assert(outstanding == Some(toc(25).timestamp)) } } - } + "not report errors about skipped commitments due to catch-up mode" in { + + val reconciliationInterval = 5 + val timeProofs = + (1L to 7) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList + val contractSetup = Map( + // contract ID to stakeholders, creation and archival time + ( + coid(0, 0), + ( + Set(alice, bob, carol), + toc(1), + toc(36), + initialTransferCounter, + initialTransferCounter, + ), + ) + ) + + val topology = Map( + localId -> Set(alice), + remoteId1 -> Set(bob), + remoteId2 -> Set(carol), + ) + + val (processor, store, sequencerClient, changes) = + testSetupDontPublish( + timeProofs, + contractSetup, + topology, + acsCommitmentsCatchUpModeEnabled = true, + ) + + val remoteCommitmentsFast = List( + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(0), ts(5)), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(5), ts(10)), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(10), ts(15)), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(15), ts(20)), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(20), ts(25)), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(25), ts(30)), + ) + + val remoteCommitmentsNormal = List( + (remoteId2, Map((coid(0, 0), initialTransferCounter)), ts(10), ts(15)), + (remoteId2, Map((coid(0, 0), initialTransferCounter)), ts(20), ts(25)), + ) + + for { + _ <- checkCatchUpModeCfgCorrect(processor, timeProofs.head) + remoteFast <- remoteCommitmentsFast.parTraverse(commitmentMsg) + deliveredFast = remoteFast.map(cmt => + ( + cmt.message.period.toInclusive.plusSeconds(1), + List(OpenEnvelope(cmt, Recipients.cc(localId))(testedProtocolVersion)), + ) + ) + // First ask for the remote commitments from remoteId1 to be processed, + // which are up to timestamp 30 + // This causes the local participant to enter catch-up mode + _ <- deliveredFast + .parTraverse_ { case (ts, batch) => + processor.processBatchInternal(ts.forgetRefinement, batch) + } + .onShutdown(fail()) + + _ = loggerFactory.assertLogs( + { + changes.foreach { case (ts, tb, change) => + processor.publish(RecordTime(ts, tb.v), change) + } + processor.flush() + }, + entry => { + entry.message should include(AcsCommitmentDegradationWithIneffectiveConfig.id) + }, + ) + + // Receive and process remote commitments from remoteId2, for skipped timestamps 10-15 and 20-25 + // The local participant did not compute those commitments because of the catch-up mode, but should + // not error (in particular, not report NO_SHARED_CONTRACTS) + remoteNormal <- remoteCommitmentsNormal.parTraverse(commitmentMsg) + deliveredNormal = remoteNormal.map(cmt => + ( + cmt.message.period.toInclusive.plusSeconds(1), + List(OpenEnvelope(cmt, Recipients.cc(localId))(testedProtocolVersion)), + ) + ) + + _ <- deliveredNormal + .parTraverse_ { case (ts, batch) => + processor.processBatchInternal(ts.forgetRefinement, batch) + } + .onShutdown(fail()) + _ <- processor.flush() + + outstanding <- store.noOutstandingCommitments(toc(30).timestamp) + computed <- store.searchComputedBetween( + CantonTimestamp.Epoch, + timeProofs.lastOption.value, + ) + computedCatchUp = computed.filter(_._2 == localId) + received <- store.searchReceivedBetween( + CantonTimestamp.Epoch, + timeProofs.lastOption.value, + ) + } yield { + // there are four sends, at the end of each coarse-grained interval 10, 20, 30, and normal 35 + sequencerClient.requests.size shouldBe 4 + // compute commitments for interval ends (10, 20, 30 and 35) x 2, and 3 empty ones for 5,15,25 as catch-up + assert(computed.size === 11) + assert(computedCatchUp.forall(_._3 == emptyCommitment) && computedCatchUp.size == 3) + assert(received.size === 8) + // cannot prune past the mismatch + assert(outstanding == Some(toc(25).timestamp)) + } + } + + "perform match for fine-grained commitments in case of mismatch at catch-up boundary" in { + + val reconciliationInterval = 5 + val timeProofs = + (1L to 7) + .map(i => i * reconciliationInterval) + .map(CantonTimestamp.ofEpochSecond) + .toList + val contractSetup = Map( + // contract ID to stakeholders, creation and archival time + ( + coid(0, 0), + ( + Set(alice, bob, carol), + toc(1), + toc(36), + initialTransferCounter, + initialTransferCounter, + ), + ) + ) + + val topology = Map( + localId -> Set(alice), + remoteId1 -> Set(bob), + remoteId2 -> Set(carol), + ) + + val (processor, store, sequencerClient, changes) = + testSetupDontPublish( + timeProofs, + contractSetup, + topology, + acsCommitmentsCatchUpModeEnabled = true, + ) + + val remoteCommitmentsFast = List( + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(0), ts(5)), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(5), ts(10)), + // coid (0,1) is not shared: the mismatch appears here, but is skipped initially during catch-up + // this commitment is buffered and checked later + ( + remoteId1, + Map((coid(0, 0), initialTransferCounter), (coid(0, 1), initialTransferCounter)), + ts(10), + ts(15), + ), + // coid (0,1) is not shared, should cause a mismatch at catch-up boundary and fine-grained sending + ( + remoteId1, + Map((coid(0, 0), initialTransferCounter), (coid(0, 1), initialTransferCounter)), + ts(15), + ts(20), + ), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(20), ts(25)), + (remoteId1, Map((coid(0, 0), initialTransferCounter)), ts(25), ts(30)), + ) + + val remoteCommitmentsNormal = List( + (remoteId2, Map((coid(0, 0), initialTransferCounter)), ts(15), ts(20)), + // coid (0,2) is not shared, but does not cause a mismatch because we hadn't computed fine-grained + // commitments for remoteId2 + ( + remoteId2, + Map((coid(0, 0), initialTransferCounter), (coid(0, 2), initialTransferCounter)), + ts(10), + ts(15), + ), + (remoteId2, Map((coid(0, 0), initialTransferCounter)), ts(20), ts(25)), + ) + + loggerFactory.assertLoggedWarningsAndErrorsSeq( + { + for { + _ <- checkCatchUpModeCfgCorrect(processor, timeProofs.head) + remoteFast <- remoteCommitmentsFast.parTraverse(commitmentMsg) + deliveredFast = remoteFast.map(cmt => + ( + cmt.message.period.toInclusive.plusSeconds(1), + List(OpenEnvelope(cmt, Recipients.cc(localId))(testedProtocolVersion)), + ) + ) + // First ask for the remote commitments from remoteId1 to be processed, + // which are up to timestamp 30 + // This causes the local participant to enter catch-up mode, and observe mismatch for timestamp 20, + // and fine-grained compute and send commitment 10-15 + _ <- deliveredFast + .parTraverse_ { case (ts, batch) => + processor.processBatchInternal(ts.forgetRefinement, batch) + } + .onShutdown(fail()) + + _ = changes.foreach { case (ts, tb, change) => + processor.publish(RecordTime(ts, tb.v), change) + } + _ <- processor.flush() + + // Receive and process remote commitments from remoteId2, for skipped timestamps 10-15 and 20-25 + // The local participant did not compute commitment 20-25 because of the catch-up mode, but should + // not error (in particular, not report NO_SHARED_CONTRACTS) + // The local participant computed commitment 10-15 because of the mismatch at 20, and should issue + // a mismatch for that period + remoteNormal <- remoteCommitmentsNormal.parTraverse(commitmentMsg) + deliveredNormal = remoteNormal.map(cmt => + ( + cmt.message.period.toInclusive.plusSeconds(1), + List(OpenEnvelope(cmt, Recipients.cc(localId))(testedProtocolVersion)), + ) + ) + + _ <- deliveredNormal + .parTraverse_ { case (ts, batch) => + processor.processBatchInternal(ts.forgetRefinement, batch) + } + .onShutdown(fail()) + _ <- processor.flush() + + outstanding <- store.noOutstandingCommitments(toc(30).timestamp) + computed <- store.searchComputedBetween( + CantonTimestamp.Epoch, + timeProofs.lastOption.value, + ) + computedCatchUp = computed.filter(_._2 == localId) + received <- store.searchReceivedBetween( + CantonTimestamp.Epoch, + timeProofs.lastOption.value, + ) + } yield { + // there are five sends, at the end of each coarse-grained interval 10, 15, 20, 30, and normal 35 + sequencerClient.requests.size shouldBe 5 + // compute commitments for interval ends (10, 15, 20, 30 and 35) x 2, and 3 empty ones for 5,15,25 as catch-up + assert(computed.size === 13) + assert(computedCatchUp.forall(_._3 == emptyCommitment) && computedCatchUp.size == 3) + assert(received.size === 9) + // cannot prune past the mismatch + assert(outstanding == Some(toc(25).timestamp)) + } + }, + LogEntry.assertLogSeq( + Seq( + ( + _.shouldBeCantonErrorCode( + AcsCommitmentProcessor.Errors.DegradationError.AcsCommitmentDegradation + ), + "", + ), + ( + _.shouldBeCantonError( + AcsCommitmentProcessor.Errors.MismatchError.CommitmentsMismatch, + _ => succeed, + _("remote") should (include(s"sender = $remoteId1") and include( + "period = CommitmentPeriod(fromExclusive = 1970-01-01T00:00:15Z, toInclusive = 1970-01-01T00:00:20Z)" + )), + ), + s"mismatch at interval 15-20 with ${remoteId1}", + ), + ( + _.shouldBeCantonError( + AcsCommitmentProcessor.Errors.MismatchError.CommitmentsMismatch, + _ => succeed, + _("remote") should (include(s"sender = $remoteId1") and include( + "period = CommitmentPeriod(fromExclusive = 1970-01-01T00:00:10Z, toInclusive = 1970-01-01T00:00:15Z)" + )), + ), + s"mismatch at interval 10-15 with buffered commitment from ${remoteId1}", + ), + ( + _.shouldBeCantonError( + AcsCommitmentProcessor.Errors.MismatchError.CommitmentsMismatch, + _ => succeed, + _("remote") should (include(s"sender = $remoteId2") and include( + "period = CommitmentPeriod(fromExclusive = 1970-01-01T00:00:10Z, toInclusive = 1970-01-01T00:00:15Z)" + )), + ), + s"mismatch at interval 10-15 with incoming commitment from ${remoteId2}", + ), + ) + ), + ) + } + } + def processChanges( + processor: AcsCommitmentProcessor, + store: AcsCommitmentStore, + changes: List[(CantonTimestamp, RequestCounter, AcsChange)], + ): Future[Unit] = { + lazy val fut = { + changes.foreach { case (ts, tb, change) => + processor.publish(RecordTime(ts, tb.v), change) + } + processor.flush() + } + for { + config <- processor.catchUpConfig(changes.head._1) + remote <- store.searchReceivedBetween(changes.head._1, changes.last._1) + _ <- config match { + case _ if remote.isEmpty => fut + case None => fut + case Some(cfg) if cfg.catchUpIntervalSkip.value == 1 => + loggerFactory.assertLogs( + fut, + entry => { + entry.message should include(AcsCommitmentDegradationWithIneffectiveConfig.id) + }, + ) + case _ => + loggerFactory.assertLogs( + fut, + entry => { + entry.message should include(AcsCommitmentDegradation.id) + }, + ) + } + } yield () + } "caching commitments" should { "caches and computes correctly" in { @@ -2552,12 +3447,26 @@ class AcsCommitmentProcessorTest } } -final case class Lifespan( - createdTs: CantonTimestamp, - archivedTs: CantonTimestamp, - assignTransferCounter: TransferCounterO, - unassignTransferCounter: TransferCounterO, -) +sealed trait Lifespan { + def activatedTs: CantonTimestamp + def deactivatedTs: CantonTimestamp + def transferCounterAtActivation: TransferCounter +} + +object Lifespan { + final case class ArchiveOnDeactivate( + activatedTs: CantonTimestamp, + deactivatedTs: CantonTimestamp, + transferCounterAtActivation: TransferCounter, + ) extends Lifespan + + final case class TransferOutOnDeactivate( + activatedTs: CantonTimestamp, + deactivatedTs: CantonTimestamp, + transferCounterAtActivation: TransferCounter, + transferCounterAtTransferOut: TransferCounter, + ) extends Lifespan +} class AcsCommitmentProcessorSyncTest extends AnyWordSpec diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ActiveContractStoreTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ActiveContractStoreTest.scala index e5bb168e5..e823c71a1 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ActiveContractStoreTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ActiveContractStoreTest.scala @@ -11,6 +11,12 @@ import com.digitalasset.canton.config.CantonRequireTypes.String300 import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.participant.store.ActiveContractSnapshot.ActiveContractIdsChange +import com.digitalasset.canton.participant.store.ActiveContractStore.ActivenessChangeDetail.{ + Archive, + Create, + TransferIn, + TransferOut, +} import com.digitalasset.canton.participant.store.ActiveContractStore.* import com.digitalasset.canton.participant.util.TimeOfChange import com.digitalasset.canton.protocol.ContractIdSyntax.* @@ -29,14 +35,8 @@ import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} import com.digitalasset.canton.store.PrunableByTimeTest import com.digitalasset.canton.topology.{DomainId, UniqueIdentifier} import com.digitalasset.canton.util.FutureInstances.* -import com.digitalasset.canton.util.{Checked, CheckedT} -import com.digitalasset.canton.{ - BaseTest, - LfPackageId, - RequestCounter, - TransferCounter, - TransferCounterO, -} +import com.digitalasset.canton.util.{Checked, CheckedT, MonadUtil} +import com.digitalasset.canton.{BaseTest, LfPackageId, RequestCounter, TransferCounter} import org.scalatest.Assertion import org.scalatest.wordspec.AsyncWordSpecLike @@ -53,14 +53,13 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { lazy val acsDomainStr: String300 = String300.tryCreate("active-contract-store::default") lazy val acsDomainId: DomainId = DomainId.tryFromString(acsDomainStr.unwrap) - lazy val initialTransferCounter: TransferCounterO = - Some(TransferCounter.Genesis) + lazy val initialTransferCounter: TransferCounter = TransferCounter.Genesis - lazy val tc1: TransferCounterO = initialTransferCounter.map(_ + 1) - lazy val tc2: TransferCounterO = initialTransferCounter.map(_ + 2) - lazy val tc3: TransferCounterO = initialTransferCounter.map(_ + 3) - lazy val tc4: TransferCounterO = initialTransferCounter.map(_ + 4) - lazy val tc5: TransferCounterO = initialTransferCounter.map(_ + 5) + lazy val tc1: TransferCounter = initialTransferCounter + 1 + lazy val tc2: TransferCounter = initialTransferCounter + 2 + lazy val tc3: TransferCounter = initialTransferCounter + 3 + lazy val tc4: TransferCounter = initialTransferCounter + 4 + lazy val tc5: TransferCounter = initialTransferCounter + 5 lazy val active = Active(initialTransferCounter) @@ -75,13 +74,35 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val coid00 = ExampleTransactionFactory.suffixedId(0, 0) val coid01 = ExampleTransactionFactory.suffixedId(0, 1) + val coid02 = ExampleTransactionFactory.suffixedId(0, 2) val coid10 = ExampleTransactionFactory.suffixedId(1, 0) val coid11 = ExampleTransactionFactory.suffixedId(1, 1) val thousandOneContracts = (1 to 1001).map(ExampleTransactionFactory.suffixedId(0, _)).toSeq val rc = RequestCounter(0) + val rc2 = rc + 1 + val rc3 = rc2 + 1 + val rc4 = rc3 + 1 + val rc5 = rc4 + 1 + val rc6 = rc5 + 1 + val ts = CantonTimestamp.assertFromInstant(Instant.parse("2019-04-04T10:00:00.00Z")) + val ts2 = ts.addMicros(1) + val ts3 = ts2.plusMillis(1) + val ts4 = ts3.plusMillis(1) + val ts5 = ts4.plusMillis(1) + val ts6 = ts5.plusMillis(1) + + // Domain with index 2 + val domain1Idx = 2 + val sourceDomain1 = SourceDomainId(DomainId(UniqueIdentifier.tryCreate("domain1", "DOMAIN1"))) + val targetDomain1 = TargetDomainId(DomainId(UniqueIdentifier.tryCreate("domain1", "DOMAIN1"))) + + // Domain with index 3 + val domain2Idx = 3 + val sourceDomain2 = SourceDomainId(DomainId(UniqueIdentifier.tryCreate("domain2", "DOMAIN2"))) + val targetDomain2 = TargetDomainId(DomainId(UniqueIdentifier.tryCreate("domain2", "DOMAIN2"))) behave like prunableByTime(mkAcs) @@ -90,7 +111,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { contains exactly `expectedContract` */ def assertSnapshots(acs: ActiveContractStore, ts: CantonTimestamp, rc: RequestCounter)( - expectedContract: Option[(LfContractId, TransferCounterO)] + expectedContract: Option[(LfContractId, TransferCounter)] ): Future[Assertion] = for { snapshotTs <- acs.snapshot(ts) @@ -132,7 +153,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value fetch <- acs.fetchStates(Seq(coid00, coid01)) @@ -150,20 +171,16 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { } } - val rc2 = RequestCounter(1) - val ts2 = ts.addMicros(1) - "creating and archiving a contract" in { val acs = mk() - for { created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value + archived <- acs .archiveContract(coid00, TimeOfChange(rc2, ts2)) .value - snapshotTs1 <- acs.snapshot(ts2.addMicros(-1)) snapshotTs2 <- acs.snapshot(ts2) @@ -198,7 +215,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value archived <- acs.archiveContract(coid00, TimeOfChange(rc2, ts)).value fetch <- acs.fetchState(coid00) @@ -217,7 +234,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value archived <- acs.archiveContract(coid00, TimeOfChange(rc, ts2)).value fetch <- acs.fetchState(coid00) @@ -235,8 +252,8 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val acs = mk() val toc = TimeOfChange(rc, ts) for { - created1 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value - created2 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value + created1 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value + created2 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value fetch <- acs.fetchState(coid00) } yield { created1 shouldBe Symbol("successful") @@ -252,11 +269,11 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val acs = mk() val toc = TimeOfChange(rc, ts) for { - created1 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value + created1 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value archived <- acs .archiveContract(coid00, TimeOfChange(rc2, ts2)) .value - created2 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value + created2 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value fetch <- acs.fetchState(coid00) snapshot <- acs.snapshot(ts2) } yield { @@ -273,9 +290,9 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val acs = mk() val toc = TimeOfChange(rc, ts) for { - created1 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value + created1 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value archived <- acs.archiveContract(coid00, toc).value - created2 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value + created2 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value fetch <- acs.fetchState(coid00) snapshot <- acs.snapshot(ts2.addMicros(-1)) } yield { @@ -289,27 +306,12 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { } } - "marking contract as active fails if the reassignment counter value is not conform with the protocol version" in { - val acs = mk() - val toc = TimeOfChange(rc, ts) - - val faultyTransferCounter = None - - for { - marked <- acs.markContractActive(coid00 -> faultyTransferCounter, toc).value - markedSuccessfully <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value - } yield { - assert(marked.isAbort, "marking the contract as active fails") - assert(markedSuccessfully.successful, "marking the contract as active succeeds") - } - } - "archival must not be timestamped before creation" in { val acs = mk() val toc = TimeOfChange(rc, ts) val toc2 = TimeOfChange(rc, ts2) for { - created <- acs.markContractActive(coid00 -> initialTransferCounter, toc2).value + created <- acs.markContractCreated(coid00 -> initialTransferCounter, toc2).value archived <- acs.archiveContract(coid00, toc).value fetch <- acs.fetchState(coid00) snapshot <- acs.snapshot(ts2) @@ -338,7 +340,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { .value fetch1 <- acs.fetchState(coid00) created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value fetch2 <- acs.fetchState(coid00) snapshot1 <- acs.snapshot(ts2.addMicros(-1)) @@ -366,7 +368,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc2 = TimeOfChange(rc2, ts2) for { created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value archived1 <- acs.archiveContract(coid00, toc2).value archived2 <- acs.archiveContract(coid00, toc2).value @@ -392,7 +394,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc3 = TimeOfChange(rc2 + 1, ts2.plusMillis(1)) for { created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value archived1 <- acs.archiveContract(coid00, toc2).value archived2 <- acs.archiveContract(coid00, toc).value @@ -410,7 +412,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { archived3.isResult && archived3.nonaborts == Chain( DoubleContractArchival(coid00, toc, toc3) ), - "third archival reports error with updated timestamp", + "third archival reports error", ) assert( fetch.contains(ContractState(Archived, toc3.rc, toc3.timestamp)), @@ -419,18 +421,15 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { } } - val rc3 = RequestCounter(2L) - val ts3 = ts2.plusMillis(1) - "several contracts can be inserted" in { val acs = mk() val toc = TimeOfChange(rc, ts) val toc3 = TimeOfChange(rc2, ts3) val toc2 = TimeOfChange(rc2, ts2) for { - created2 <- acs.markContractActive(coid01 -> initialTransferCounter, toc3).value - created1 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value - created3 <- acs.markContractActive(coid10 -> initialTransferCounter, toc2).value + created2 <- acs.markContractCreated(coid01 -> initialTransferCounter, toc3).value + created1 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value + created3 <- acs.markContractCreated(coid10 -> initialTransferCounter, toc2).value archived3 <- acs.archiveContract(coid10, toc3).value fetch <- acs.fetchStates(Seq(coid00, coid01, coid10)) snapshot1 <- acs.snapshot(ts) @@ -464,11 +463,11 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val tocTs = TimeOfChange(rc, ts.plusMillis(1)) val tocRc = TimeOfChange(rc + 1, ts) for { - created1 <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value - created2 <- acs.markContractActive(coid00 -> initialTransferCounter, tocTs).value + created1 <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value + created2 <- acs.markContractCreated(coid00 -> initialTransferCounter, tocTs).value fetch2 <- acs.fetchState(coid00) snapshot <- acs.snapshot(ts.plusMillis(2)) - created3 <- acs.markContractActive(coid00 -> initialTransferCounter, tocRc).value + created3 <- acs.markContractCreated(coid00 -> initialTransferCounter, tocRc).value fetch3 <- acs.fetchState(coid00) } yield { assert(created1.successful, "succeed") @@ -479,7 +478,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { withClue("fail if request counter differs") { created3 shouldBe Symbol("result") - created3.nonaborts.toList.toSet shouldBe Set(DoubleContractCreation(coid00, tocTs, tocRc)) + created3.nonaborts.toList.toSet shouldBe Set(DoubleContractCreation(coid00, toc, tocRc)) } assert( @@ -508,7 +507,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { archived2 <- acs.archiveContract(coid00, tocTs).value archived3 <- acs.archiveContract(coid00, tocRc).value created <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value fetch <- acs.fetchState(coid00) snapshot1 <- acs.snapshot(ts3.addMicros(-2)) @@ -549,7 +548,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc = TimeOfChange(rc, ts) for { created <- acs - .markContractsActive( + .markContractsCreated( Seq( coid00 -> initialTransferCounter, coid01 -> initialTransferCounter, @@ -579,7 +578,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val acs = mk() for { created <- acs - .markContractsActive(Seq.empty[(LfContractId, TransferCounterO)], TimeOfChange(rc, ts)) + .markContractsCreated(Seq.empty[(LfContractId, TransferCounter)], TimeOfChange(rc, ts)) .value } yield assert(created.successful, "succeed") } @@ -590,13 +589,13 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc2 = TimeOfChange(rc2, ts2) for { created1 <- acs - .markContractsActive( + .markContractsCreated( Seq(coid00 -> initialTransferCounter, coid01 -> initialTransferCounter), toc1, ) .value created2 <- acs - .markContractsActive( + .markContractsCreated( Seq( coid00 -> initialTransferCounter, coid01 -> initialTransferCounter, @@ -634,7 +633,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc2 = TimeOfChange(rc2, ts2) for { created <- acs - .markContractsActive( + .markContractsCreated( Seq( coid00 -> initialTransferCounter, coid01 -> initialTransferCounter, @@ -675,17 +674,11 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { } yield { archived1 shouldBe Symbol("successful") archived2 shouldBe Symbol("isResult") - archived2.nonaborts should (equal( - Chain( - DoubleContractArchival(coid00, toc, toc2), - DoubleContractArchival(coid01, toc, toc2), - ) - ) or equal( - Chain( - DoubleContractArchival(coid01, toc, toc2), - DoubleContractArchival(coid00, toc, toc2), - ) - )) + archived2.nonaborts.toList.toSet shouldBe Set( + DoubleContractArchival(coid00, toc, toc2), + DoubleContractArchival(coid01, toc, toc2), + ) + fetch shouldBe Map( coid00 -> ContractState( Archived, @@ -712,11 +705,167 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { } yield assert(archived.successful, "succeed") } - val sourceDomain1 = SourceDomainId(DomainId(UniqueIdentifier.tryCreate("domain1", "DOMAIN1"))) - val targetDomain1 = TargetDomainId(DomainId(UniqueIdentifier.tryCreate("domain1", "DOMAIN1"))) + "add" should { + "be idempotent" in { + val acs = mk() + val toc0 = TimeOfChange(rc, ts) + for { + add1 <- acs + .markContractAdded(coid00 -> initialTransferCounter, toc0) + .value + add2 <- acs + .markContractAdded(coid00 -> initialTransferCounter, toc0) + .value + } yield { + assert(add1.successful, "create is successful") + assert(add2.successful, "create is successful") + } + } - val sourceDomain2 = SourceDomainId(DomainId(UniqueIdentifier.tryCreate("domain2", "DOMAIN2"))) - val targetDomain2 = TargetDomainId(DomainId(UniqueIdentifier.tryCreate("domain2", "DOMAIN2"))) + "be rejected on active contract" in { + val acs = mk() + val toc0 = TimeOfChange(rc, ts) + val toc1 = TimeOfChange(rc + 1, ts.plusSeconds(1)) + for { + added <- acs + .markContractsAdded(Seq(coid00 -> initialTransferCounter), toc0) + .value + created <- acs + .markContractCreated(coid01 -> initialTransferCounter, toc0) + .value + transferredIn <- acs + .transferInContract( + coid02, + toc0, + sourceDomain1, + initialTransferCounter + 1, + ) + .value + + addAdd <- acs.markContractAdded(coid00 -> initialTransferCounter, toc0).value + createAdd <- acs.markContractAdded(coid01 -> initialTransferCounter, toc1).value + tfInAdd <- acs.markContractAdded(coid02 -> initialTransferCounter, toc1).value + } yield { + assert(added.successful, "add is successful") + assert(created.successful, "create is successful") + assert(transferredIn.successful, "transfer-in is successful") + + assert(addAdd.successful, "idempotent add is successful") + assert( + createAdd.nonaborts.toList + .contains(DoubleContractCreation(coid01, toc0, toc1)), + "cannot add an active contract", + ) + assert( + tfInAdd.nonaborts.toList + .contains(DoubleContractCreation(coid02, toc0, toc1)), + "cannot add an active contract", + ) + } + } + } + + "purge" should { + "be idempotent" in { + val acs = mk() + val toc0 = TimeOfChange(rc, ts) + val toc1 = TimeOfChange(rc + 1, ts.plusSeconds(1)) + for { + create <- acs.markContractCreated(coid00 -> initialTransferCounter, toc0).value + purge1 <- acs.purgeContract(coid00, toc1).value + purge2 <- acs.purgeContract(coid00, toc1).value + } yield { + assert(create.successful, "create is successful") + assert(purge1.successful, "purge1 is successful") + assert(purge2.successful, "purge2 is successful") + } + } + } + + "purge/add" should { + "add should be allowed after a purge" in { + val acs = mk() + val toc0 = TimeOfChange(rc, ts) + val toc1 = TimeOfChange(rc + 1, ts.plusSeconds(1)) + val toc2 = TimeOfChange(rc + 2, ts.plusSeconds(2)) + for { + creates <- acs + .markContractsCreated( + Seq( + coid00 -> initialTransferCounter, + coid01 -> initialTransferCounter, + coid02 -> initialTransferCounter, + ), + toc0, + ) + .value + + archive <- acs.archiveContracts(Seq(coid00), toc1).value + purge <- acs.purgeContracts(Seq(coid01, coid02), toc1).value + + addAfterArchive <- acs + .markContractAdded(coid00 -> initialTransferCounter, toc2) + .value + + addAfterPurge <- acs + .markContractAdded(coid01 -> initialTransferCounter, toc2) + .value + + createAfterPurge <- acs + .markContractCreated(coid02 -> initialTransferCounter, toc2) + .value + } yield { + assert(creates.successful, "create is successful") + assert(archive.successful, "archive is successful") + assert(purge.successful, "create is successful") + + assert( + addAfterArchive.nonaborts.toList + .contains(ChangeAfterArchival(coid00, toc1, toc2)), + "cannot add an archived contract", + ) + assert(addAfterPurge.successful, "add after purge is successful") + assert( + createAfterPurge.nonaborts.toList + .contains(DoubleContractCreation(coid02, toc0, toc2)), + "cannot create again an added contract", + ) + } + } + + "add and purge can be used repeatedly" in { + val acs = mk() + val tocCreate = TimeOfChange(rc, ts) + val cyclesCount = 5L // number of purge, add + for { + creates <- acs + .markContractCreated(coid00 -> initialTransferCounter, tocCreate) + .value + + purgeAddResults <- MonadUtil.sequentialTraverse(0L until cyclesCount) { i => + val shift = 2 * i + 1 + for { + purge <- acs + .purgeContract(coid00, TimeOfChange(rc + shift, ts.plusSeconds(shift))) + .value + add <- acs + .markContractAdded( + coid00 -> (initialTransferCounter + shift), + TimeOfChange(rc + shift + 1, ts.plusSeconds(shift + 1)), + ) + .value + } yield Seq(purge.successful, add.successful) + } + + archiveToc = TimeOfChange(rc + 2 * cyclesCount + 1, ts.plusSeconds(2 * cyclesCount + 1)) + archive <- acs.archiveContract(coid00, archiveToc).value + } yield { + assert(creates.successful, "create is successful") + assert(archive.successful, "archive is successful") + assert(purgeAddResults.flatten.forall(identity), "purges + adds are successful") + } + } + } "transfer-out makes a contract inactive" in { val acs = mk() @@ -724,7 +873,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc2 = TimeOfChange(rc + 1, ts.plusSeconds(1)) for { created <- acs - .markContractsActive( + .markContractsCreated( Seq(coid00 -> initialTransferCounter, coid01 -> initialTransferCounter), toc, ) @@ -798,7 +947,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc5 = TimeOfChange(rc + 6, ts.plusSeconds(7)) val toc6 = TimeOfChange(rc + 7, ts.plusSeconds(70)) for { - create <- acs.markContractActive(coid00 -> initialTransferCounter, toc1).value + create <- acs.markContractCreated(coid00 -> initialTransferCounter, toc1).value fetch0 <- acs.fetchState(coid00) out1 <- acs.transferOutContract(coid00, toc2, targetDomain2, tc1).value fetch1 <- acs.fetchState(coid00) @@ -891,7 +1040,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { out2 <- acs.transferOutContract(coid00, toc4, targetDomain2, tc4).value in2 <- acs.transferInContract(coid00, toc5, sourceDomain2, tc5).value in1 <- acs.transferInContract(coid00, toc3, sourceDomain1, tc3).value - create <- acs.markContractActive(coid00 -> initialTransferCounter, toc1).value + create <- acs.markContractCreated(coid00 -> initialTransferCounter, toc1).value snapshot1 <- acs.snapshot(toc1.timestamp) snapshot2 <- acs.snapshot(toc2.timestamp) snapshot3 <- acs.snapshot(toc3.timestamp) @@ -998,8 +1147,8 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { SimultaneousActivation( coid00, toc1, - TransferDetails(sourceDomain1, initialTransferCounter), - TransferDetails(sourceDomain2, initialTransferCounter), + TransferIn(initialTransferCounter, domain1Idx), + TransferIn(initialTransferCounter, domain2Idx), ) ), "second transfer-in is flagged", @@ -1030,8 +1179,8 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { SimultaneousDeactivation( coid00, toc1, - TransferDetails(targetDomain1, initialTransferCounter), - TransferDetails(targetDomain2, initialTransferCounter), + TransferOut(initialTransferCounter, domain1Idx), + TransferOut(initialTransferCounter, domain2Idx), ) ), "second transfer-out is flagged", @@ -1046,6 +1195,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { ) } } + "complain about simultaneous archivals and transfer-outs" in { val acs = mk() val toc = TimeOfChange(rc, ts) @@ -1060,9 +1210,8 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { SimultaneousDeactivation( coid00, toc, - TransferDetails(targetDomain1, initialTransferCounter), - // The acs always stores transfer counter None for archivals - CreationArchivalDetail(None: TransferCounterO), + TransferOut(initialTransferCounter, domain1Idx), + Archive, ) ), "archival is flagged", @@ -1080,7 +1229,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val acs = mk() val toc = TimeOfChange(rc, ts) for { - create <- acs.markContractActive(coid00 -> initialTransferCounter, toc).value + create <- acs.markContractCreated(coid00 -> initialTransferCounter, toc).value in <- acs.transferInContract(coid00, toc, sourceDomain1, initialTransferCounter).value fetch <- acs.fetchState(coid00) } yield { @@ -1090,8 +1239,8 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { SimultaneousActivation( coid00, toc, - CreationArchivalDetail(initialTransferCounter), - TransferDetails(sourceDomain1, initialTransferCounter), + Create(initialTransferCounter), + TransferIn(initialTransferCounter, domain1Idx), ) ), "transfer-in is flagged", @@ -1155,7 +1304,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc3 = TimeOfChange(rc + 3, ts.plusSeconds(2)) val toc4 = TimeOfChange(rc + 2, ts.plusSeconds(3)) for { - create <- acs.markContractActive(coid00 -> initialTransferCounter, toc3).value + create <- acs.markContractCreated(coid00 -> initialTransferCounter, toc3).value in1 <- acs.transferInContract(coid00, toc1, sourceDomain1, initialTransferCounter).value fetch3 <- acs.fetchState(coid00) in4 <- acs.transferInContract(coid00, toc4, sourceDomain2, tc3).value @@ -1201,13 +1350,13 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc3 = TimeOfChange(rc3, ts3) for { _ <- acs - .markContractsActive( + .markContractsCreated( Seq(coid00 -> initialTransferCounter, coid01 -> initialTransferCounter), toc, ) .value _ <- acs - .markContractsActive( + .markContractsCreated( Seq(coid10 -> initialTransferCounter, coid11 -> initialTransferCounter), toc, ) @@ -1216,7 +1365,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { _ <- acs.transferOutContract(coid11, toc2, targetDomain1, initialTransferCounter).value _ <- acs.archiveContracts(Seq(coid01), toc3).value _ <- acs - .markContractsActive( + .markContractsCreated( Seq(coid20 -> initialTransferCounter, coid21 -> initialTransferCounter), toc3, ) @@ -1318,18 +1467,18 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc2 = TimeOfChange(rc + 2, ts2) for { - _ <- acs.markContractActive(coid00 -> initialTransferCounter, toc1).value + _ <- acs.markContractCreated(coid00 -> initialTransferCounter, toc1).value _ <- acs - .markContractsActive( + .markContractsCreated( Seq(coid10 -> initialTransferCounter, coid11 -> initialTransferCounter), toc, ) .value - _ <- acs.markContractActive(coid01 -> initialTransferCounter, toc2).value + _ <- acs.markContractCreated(coid01 -> initialTransferCounter, toc2).value snapshot <- acs.snapshot(ts2) } yield { val idOrdering = Ordering[LfContractId] - val resultOrdering = Ordering.Tuple2[LfContractId, (CantonTimestamp, TransferCounterO)] + val resultOrdering = Ordering.Tuple2[LfContractId, (CantonTimestamp, TransferCounter)] snapshot.toList shouldBe snapshot.toList.sorted(resultOrdering) snapshot.keys.toList shouldBe snapshot.keys.toList.sorted(idOrdering) } @@ -1347,7 +1496,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc32 = TimeOfChange(rc + 3, ts2) val toc4 = TimeOfChange(rc + 4, ts4) for { - _ <- valueOrFail(acs.markContractActive(coid00 -> initialTransferCounter, toc1))( + _ <- valueOrFail(acs.markContractCreated(coid00 -> initialTransferCounter, toc1))( s"create $coid00" ) _ <- acs.transferInContract(coid00, toc0, sourceDomain1, initialTransferCounter).value @@ -1369,7 +1518,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { s"archive $coid00 at $toc32" ) _ <- valueOrFail( - acs.markContractActive( + acs.markContractCreated( coid00 -> initialTransferCounter, TimeOfChange(rc - 1, ts.plusSeconds(-1)), ) @@ -1391,14 +1540,14 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val toc3 = TimeOfChange(rc3, ts3) for { _ <- valueOrFail( - acs.markContractsActive( + acs.markContractsCreated( Seq(coid00 -> initialTransferCounter, coid01 -> initialTransferCounter), toc1, ) )( s"create contracts at $toc1" ) - _ <- valueOrFail(acs.markContractsActive(Seq(coid10 -> initialTransferCounter), toc2))( + _ <- valueOrFail(acs.markContractsCreated(Seq(coid10 -> initialTransferCounter), toc2))( s"create contracts at $toc2" ) snapshot1 <- acs.contractSnapshot(Set(coid00, coid10), toc1.timestamp) @@ -1529,13 +1678,6 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { } } - val rc4 = rc3 + 1 - val ts4 = ts3.plusMillis(1) - val rc5 = rc4 + 1 - val ts5 = ts4.plusMillis(1) - val rc6 = rc5 + 1 - val ts6 = ts5.plusMillis(1) - "return correct changes" in { val acs = mk() val toc1 = TimeOfChange(rc, ts) @@ -1547,7 +1689,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { _ <- valueOrFail( - acs.markContractsActive( + acs.markContractsCreated( Seq(coid00 -> initialTransferCounter, coid01 -> initialTransferCounter), toc1, ) @@ -1555,7 +1697,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { s"create contracts at $toc1" ) - _ <- valueOrFail(acs.markContractsActive(Seq(coid10 -> initialTransferCounter), toc2))( + _ <- valueOrFail(acs.markContractsCreated(Seq(coid10 -> initialTransferCounter), toc2))( s"create contracts at $toc2" ) _ <- valueOrFail(acs.archiveContract(coid01, toc2))( @@ -1590,10 +1732,11 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { ActiveContractIdsChange( activations = Map( coid10 -> StateChangeType(ContractChange.Created, initialTransferCounter), - coid11 -> StateChangeType(ContractChange.Assigned, tc1), + coid11 -> StateChangeType(ContractChange.TransferredIn, tc1), + ), + deactivations = Map( + coid01 -> StateChangeType(ContractChange.Archived, initialTransferCounter) ), - deactivations = - Map(coid01 -> StateChangeType(ContractChange.Archived, initialTransferCounter)), ), ), ( @@ -1602,14 +1745,14 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { activations = Map.empty, deactivations = Map( coid10 -> StateChangeType(ContractChange.Archived, initialTransferCounter), - coid11 -> StateChangeType(ContractChange.Unassigned, tc2), + coid11 -> StateChangeType(ContractChange.TransferredOut, tc2), ), ), ), ( toc4, ActiveContractIdsChange( - activations = Map(coid11 -> StateChangeType(ContractChange.Assigned, tc3)), + activations = Map(coid11 -> StateChangeType(ContractChange.TransferredIn, tc3)), deactivations = Map.empty, ), ), @@ -1629,7 +1772,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { // Archived contract after creation has initialTransferCounter, or None in older proto versions created1 <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value archived1 <- acs.archiveContract(coid00, TimeOfChange(rc2, ts2)).value transferCounterSnapshot1 <- acs.bulkContractsTransferCounterSnapshot(Set(coid00), rc2) @@ -1638,7 +1781,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { assertion1 <- assertSnapshots(acs, ts, rc)(Some((coid00, initialTransferCounter))) // Archived contract after several transfer-ins has the last transfer-in counter, or None in older proto versions created2 <- acs - .markContractActive(coid01 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid01 -> initialTransferCounter, TimeOfChange(rc, ts)) .value transferOut2 <- acs .transferOutContract(coid01, TimeOfChange(rc2, ts2), targetDomain1, tc2) @@ -1666,7 +1809,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { val acs = mk() for { created1 <- acs - .markContractActive(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) + .markContractCreated(coid00 -> initialTransferCounter, TimeOfChange(rc, ts)) .value transferIn1 <- acs .transferInContract(coid01, TimeOfChange(rc2, ts2), sourceDomain2, tc2) @@ -1724,7 +1867,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { def activateMaybeDeactivate( activate: ActiveContractStore => CheckedT[Future, AcsError, AcsWarning, Unit] = { acs => - acs.markContractActive(coid00 -> initialTransferCounter, toc1) + acs.markContractCreated(coid00 -> initialTransferCounter, toc1) }, deactivate: Option[ActiveContractStore => CheckedT[Future, AcsError, AcsWarning, Unit]] = None, @@ -1801,7 +1944,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { _ <- addContractsToStore(contractStore, contracts) - activate = acs.markContractsActive( + activate = acs.markContractsCreated( contracts.map(_._1).map(cid => cid -> initialTransferCounter), toc1, ) @@ -1827,7 +1970,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { _ <- addContractsToStore(contractStore, List(coid00 -> packageId)) - _ <- valueOrFail(acs.markContractActive(coid00 -> initialTransferCounter, toc1))( + _ <- valueOrFail(acs.markContractCreated(coid00 -> initialTransferCounter, toc1))( s"create contract at $toc1" ) @@ -1860,7 +2003,7 @@ trait ActiveContractStoreTest extends PrunableByTimeTest { for { _ <- addContractsToStore(contractStore, contracts) - activate = acs.markContractsActive( + activate = acs.markContractsCreated( contracts.map(_._1).map(cid => cid -> initialTransferCounter), toc1, ) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala index 285b3aee0..46aaa620e 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/HookedAcs.scala @@ -20,9 +20,10 @@ import com.digitalasset.canton.protocol.{ TransferDomainId, } import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} +import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.CheckedT -import com.digitalasset.canton.{RequestCounter, TransferCounterO} +import com.digitalasset.canton.{RequestCounter, TransferCounter} import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.SortedMap @@ -31,20 +32,21 @@ import scala.concurrent.{ExecutionContext, Future} private[participant] class HookedAcs(private val acs: ActiveContractStore)(implicit val ec: ExecutionContext ) extends ActiveContractStore { - import HookedAcs.{noArchiveAction, noCreateAction, noTransferAction} + import HookedAcs.{noArchivePurgeAction, noCreateAddAction, noTransferAction} - private val nextCreateHook - : AtomicReference[(Seq[(LfContractId, TransferCounterO)], TimeOfChange) => Future[Unit]] = - new AtomicReference[(Seq[(LfContractId, TransferCounterO)], TimeOfChange) => Future[Unit]]( - noCreateAction + private val nextCreateAddHook + : AtomicReference[(Seq[(LfContractId, TransferCounter)], TimeOfChange) => Future[Unit]] = + new AtomicReference[(Seq[(LfContractId, TransferCounter)], TimeOfChange) => Future[Unit]]( + noCreateAddAction ) - private val nextArchiveHook: AtomicReference[(Seq[LfContractId], TimeOfChange) => Future[Unit]] = - new AtomicReference[(Seq[LfContractId], TimeOfChange) => Future[Unit]](noArchiveAction) + private val nextArchivePurgeHook + : AtomicReference[(Seq[LfContractId], TimeOfChange) => Future[Unit]] = + new AtomicReference[(Seq[LfContractId], TimeOfChange) => Future[Unit]](noArchivePurgeAction) private val nextTransferHook = new AtomicReference[ ( - Seq[(LfContractId, TransferDomainId, TransferCounterO, TimeOfChange)], - Boolean, + Seq[(LfContractId, TransferDomainId, TransferCounter, TimeOfChange)], + Boolean, // true for transfer-out, false for transfer-in ) => Future[Unit] ]( noTransferAction @@ -52,15 +54,17 @@ private[participant] class HookedAcs(private val acs: ActiveContractStore)(impli private val nextFetchHook: AtomicReference[Iterable[LfContractId] => Future[Unit]] = new AtomicReference[Iterable[LfContractId] => Future[Unit]](noFetchAction) - def setCreateHook( - preCreate: (Seq[(LfContractId, TransferCounterO)], TimeOfChange) => Future[Unit] + override private[store] def indexedStringStore: IndexedStringStore = acs.indexedStringStore + + def setCreateAddHook( + preCreate: (Seq[(LfContractId, TransferCounter)], TimeOfChange) => Future[Unit] ): Unit = - nextCreateHook.set(preCreate) - def setArchiveHook(preArchive: (Seq[LfContractId], TimeOfChange) => Future[Unit]): Unit = - nextArchiveHook.set(preArchive) + nextCreateAddHook.set(preCreate) + def setArchivePurgeHook(preArchive: (Seq[LfContractId], TimeOfChange) => Future[Unit]): Unit = + nextArchivePurgeHook.set(preArchive) def setTransferHook( preTransfer: ( - Seq[(LfContractId, TransferDomainId, TransferCounterO, TimeOfChange)], + Seq[(LfContractId, TransferDomainId, TransferCounter, TimeOfChange)], Boolean, ) => Future[Unit] ): Unit = @@ -68,25 +72,27 @@ private[participant] class HookedAcs(private val acs: ActiveContractStore)(impli def setFetchHook(preFetch: Iterable[LfContractId] => Future[Unit]): Unit = nextFetchHook.set(preFetch) - override def markContractsActive( - contracts: Seq[(LfContractId, TransferCounterO)], + override def markContractsCreatedOrAdded( + contracts: Seq[(LfContractId, TransferCounter)], toc: TimeOfChange, + isCreation: Boolean, )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = CheckedT { - val preCreate = nextCreateHook.getAndSet(noCreateAction) + val preCreate = nextCreateAddHook.getAndSet(noCreateAddAction) preCreate(contracts, toc).flatMap { _ => - acs.markContractsActive(contracts, toc).value + acs.markContractsCreated(contracts, toc).value } } - override def archiveContracts( + override def purgeOrArchiveContracts( contracts: Seq[LfContractId], toc: TimeOfChange, + isArchival: Boolean, )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = CheckedT { - val preArchive = nextArchiveHook.getAndSet(noArchiveAction) + val preArchive = nextArchivePurgeHook.getAndSet(noArchivePurgeAction) preArchive(contracts, toc) .flatMap { _ => acs.archiveContracts(contracts, toc).value @@ -94,7 +100,7 @@ private[participant] class HookedAcs(private val acs: ActiveContractStore)(impli } override def transferInContracts( - transferIns: Seq[(LfContractId, SourceDomainId, TransferCounterO, TimeOfChange)] + transferIns: Seq[(LfContractId, SourceDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = CheckedT { @@ -108,12 +114,15 @@ private[participant] class HookedAcs(private val acs: ActiveContractStore)(impli } override def transferOutContracts( - transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounterO, TimeOfChange)] + transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = CheckedT { val preTransfer = nextTransferHook.getAndSet(noTransferAction) - preTransfer(transferOuts, true).flatMap { _ => + preTransfer( + transferOuts, + true, + ).flatMap { _ => acs.transferOutContracts(transferOuts).value } } @@ -132,12 +141,12 @@ private[participant] class HookedAcs(private val acs: ActiveContractStore)(impli override def snapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)]] = + ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounter)]] = acs.snapshot(timestamp) override def snapshot(rc: RequestCounter)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounterO)]] = + ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounter)]] = acs.snapshot(rc) override def contractSnapshot(contractIds: Set[LfContractId], timestamp: CantonTimestamp)(implicit @@ -150,7 +159,7 @@ private[participant] class HookedAcs(private val acs: ActiveContractStore)(impli requestCounter: RequestCounter, )(implicit traceContext: TraceContext - ): Future[Map[LfContractId, TransferCounterO]] = + ): Future[Map[LfContractId, TransferCounter]] = acs.bulkContractsTransferCounterSnapshot(contractIds, requestCounter) override def doPrune(beforeAndIncluding: CantonTimestamp, lastPruning: Option[CantonTimestamp])( @@ -190,17 +199,17 @@ private[participant] class HookedAcs(private val acs: ActiveContractStore)(impli } object HookedAcs { - private val noCreateAction - : (Seq[(LfContractId, TransferCounterO)], TimeOfChange) => Future[Unit] = { (_, _) => + private val noCreateAddAction + : (Seq[(LfContractId, TransferCounter)], TimeOfChange) => Future[Unit] = { (_, _) => Future.unit } - private val noArchiveAction: (Seq[LfContractId], TimeOfChange) => Future[Unit] = { (_, _) => + private val noArchivePurgeAction: (Seq[LfContractId], TimeOfChange) => Future[Unit] = { (_, _) => Future.unit } private val noTransferAction: ( - Seq[(LfContractId, TransferDomainId, TransferCounterO, TimeOfChange)], + Seq[(LfContractId, TransferDomainId, TransferCounter, TimeOfChange)], Boolean, ) => Future[Unit] = { (_, _) => Future.unit diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ThrowingAcs.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ThrowingAcs.scala index 9e8c2082e..9d7b0f9aa 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ThrowingAcs.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/ThrowingAcs.scala @@ -14,9 +14,10 @@ import com.digitalasset.canton.participant.store.ActiveContractStore.{ import com.digitalasset.canton.participant.util.{StateChange, TimeOfChange} import com.digitalasset.canton.protocol.{LfContractId, SourceDomainId, TargetDomainId} import com.digitalasset.canton.pruning.{PruningPhase, PruningStatus} +import com.digitalasset.canton.store.IndexedStringStore import com.digitalasset.canton.tracing.TraceContext import com.digitalasset.canton.util.{Checked, CheckedT} -import com.digitalasset.canton.{RequestCounter, TransferCounterO} +import com.digitalasset.canton.{RequestCounter, TransferCounter} import scala.collection.immutable.SortedMap import scala.concurrent.{ExecutionContext, Future} @@ -25,31 +26,41 @@ class ThrowingAcs[T <: Throwable](mk: String => T)(override implicit val ec: Exe extends ActiveContractStore { private[this] type M = Checked[AcsError, AcsWarning, Unit] - override def markContractsActive( - contracts: Seq[(LfContractId, TransferCounterO)], + override private[store] def indexedStringStore: IndexedStringStore = throw new RuntimeException( + "I should not be called" + ) + + override def markContractsCreatedOrAdded( + contracts: Seq[(LfContractId, TransferCounter)], toc: TimeOfChange, + isCreation: Boolean, )(implicit traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = - CheckedT(Future.failed[M](mk(s"createContracts for $contracts at $toc"))) + ): CheckedT[Future, AcsError, AcsWarning, Unit] = { + val operation = if (isCreation) "create contracts" else "add contracts" + CheckedT(Future.failed[M](mk(s"$operation for $contracts at $toc"))) + } - override def archiveContracts( + override def purgeOrArchiveContracts( contracts: Seq[LfContractId], toc: TimeOfChange, + isArchival: Boolean, )(implicit traceContext: TraceContext - ): CheckedT[Future, AcsError, AcsWarning, Unit] = - CheckedT(Future.failed[M](mk(s"archiveContracts for $contracts at $toc"))) + ): CheckedT[Future, AcsError, AcsWarning, Unit] = { + val operation = if (isArchival) "archive contracts" else "purge contracts" + CheckedT(Future.failed[M](mk(s"$operation for $contracts at $toc"))) + } override def transferInContracts( - transferIns: Seq[(LfContractId, SourceDomainId, TransferCounterO, TimeOfChange)] + transferIns: Seq[(LfContractId, SourceDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = CheckedT(Future.failed[M](mk(s"transferInContracts for $transferIns"))) override def transferOutContracts( - transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounterO, TimeOfChange)] + transferOuts: Seq[(LfContractId, TargetDomainId, TransferCounter, TimeOfChange)] )(implicit traceContext: TraceContext ): CheckedT[Future, AcsError, AcsWarning, Unit] = @@ -68,12 +79,12 @@ class ThrowingAcs[T <: Throwable](mk: String => T)(override implicit val ec: Exe override def snapshot(timestamp: CantonTimestamp)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounterO)]] = + ): Future[SortedMap[LfContractId, (CantonTimestamp, TransferCounter)]] = Future.failed(mk(s"snapshot at $timestamp")) override def snapshot(rc: RequestCounter)(implicit traceContext: TraceContext - ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounterO)]] = + ): Future[SortedMap[LfContractId, (RequestCounter, TransferCounter)]] = Future.failed(mk(s"snapshot at $rc")) override def contractSnapshot(contractIds: Set[LfContractId], timestamp: CantonTimestamp)(implicit @@ -88,8 +99,8 @@ class ThrowingAcs[T <: Throwable](mk: String => T)(override implicit val ec: Exe requestCounter: RequestCounter, )(implicit traceContext: TraceContext - ): Future[Map[LfContractId, TransferCounterO]] = - Future.failed[Map[LfContractId, TransferCounterO]]( + ): Future[Map[LfContractId, TransferCounter]] = + Future.failed[Map[LfContractId, TransferCounter]]( mk( s"bulkContractsTransferCounterSnapshot for $contractIds up to but not including $requestCounter" ) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala index 6340cd4f8..22c63306d 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/TransferStoreTest.scala @@ -54,7 +54,6 @@ import com.digitalasset.canton.{ RequestCounter, SequencerCounter, TransferCounter, - TransferCounterO, config, } import org.scalatest.wordspec.AsyncWordSpec @@ -1336,7 +1335,7 @@ object TransferStoreTest extends EitherValues with NoTracing { ) } - private val initialTransferCounter: TransferCounterO = Some(TransferCounter.Genesis) + private val initialTransferCounter: TransferCounter = TransferCounter.Genesis val seedGenerator = new SeedGenerator(pureCryptoApi) diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala index aedb79ff6..75f63f1b7 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/db/DbActiveContractStoreTest.scala @@ -20,7 +20,7 @@ import scala.concurrent.Future trait DbActiveContractStoreTest extends AsyncWordSpec with BaseTest with ActiveContractStoreTest { this: DbTest => - val domainIndex = 1 + private val domainIndex = 1 override def cleanDb(storage: DbStorage): Future[Unit] = { import storage.api.* @@ -39,12 +39,10 @@ trait DbActiveContractStoreTest extends AsyncWordSpec with BaseTest with ActiveC ec => { val indexStore = new InMemoryIndexedStringStore(minIndex = 1, maxIndex = maxDomainIndex) - val domainId = { - IndexedDomain.tryCreate( - acsDomainId, - indexStore.getOrCreateIndexForTesting(IndexedStringType.domainId, acsDomainStr), - ) - } + val domainId = IndexedDomain.tryCreate( + acsDomainId, + indexStore.getOrCreateIndexForTesting(IndexedStringType.domainId, acsDomainStr), + ) // Check we end up with the expected domain index. If we don't, then test isolation may get broken. assert(domainId.index == domainIndex) new DbActiveContractStore( diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala index 498ab9e60..5a5708af8 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/store/memory/ActiveContractStoreTestInMemory.scala @@ -6,6 +6,7 @@ package com.digitalasset.canton.participant.store.memory import com.digitalasset.canton.BaseTest import com.digitalasset.canton.lifecycle.CloseContext import com.digitalasset.canton.participant.store.* +import com.digitalasset.canton.store.memory.InMemoryIndexedStringStore import com.digitalasset.canton.version.HasTestCloseContext import org.scalatest.wordspec.AsyncWordSpec @@ -17,9 +18,18 @@ class ActiveContractStoreTestInMemory override protected implicit lazy val closeContext: CloseContext = HasTestCloseContext.makeTestCloseContext(logger) + /* + We need 3 domains. + In `DbActiveContractStoreTest`, we have an `acsDomainId` which takes index 1, so we start at 2. + */ + private lazy val indexedStringStore = new InMemoryIndexedStringStore(minIndex = 2, maxIndex = 4) + "InMemoryActiveContractStore" should { behave like activeContractStore( - ec => new InMemoryActiveContractStore(testedProtocolVersion, loggerFactory)(ec), + ec => + new InMemoryActiveContractStore(indexedStringStore, testedProtocolVersion, loggerFactory)( + ec + ), ec => new InMemoryContractStore(loggerFactory)(ec), ) } diff --git a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala index 634338d87..628da24db 100644 --- a/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala +++ b/community/participant/src/test/scala/com/digitalasset/canton/participant/topology/LedgerServerPartyNotifierTest.scala @@ -56,6 +56,8 @@ final class LedgerServerPartyNotifierTest extends AsyncWordSpec with BaseTest { private var counter = SequencerCounter(0) + // TODO(#17726) Figure out whether what this synchronization block is actually guarding. + @SuppressWarnings(Array("com.digitalasset.canton.SynchronizedFuture")) def simulateTransaction(mapping: PartyToParticipantX): Future[Unit] = blocking { clock.synchronized { diff --git a/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala b/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala index 5f59516b4..de1800e10 100644 --- a/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala +++ b/community/testing/src/main/scala/com/digitalasset/canton/BaseTest.scala @@ -407,7 +407,6 @@ object BaseTest { requiredHashAlgorithms = SymbolicCryptoProvider.supportedHashAlgorithms, requiredCryptoKeyFormats = SymbolicCryptoProvider.supportedCryptoKeyFormats, protocolVersion = protocolVersion, - acsCommitmentsCatchUp = acsCommitmentsCatchUp, ) lazy val testedProtocolVersion: ProtocolVersion = diff --git a/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicPrivateCrypto.scala b/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicPrivateCrypto.scala index 52e26cf82..9c566fc54 100644 --- a/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicPrivateCrypto.scala +++ b/community/testing/src/main/scala/com/digitalasset/canton/crypto/provider/symbolic/SymbolicPrivateCrypto.scala @@ -54,4 +54,6 @@ class SymbolicPrivateCrypto( EncryptionKeyPair.create(id, CryptoKeyFormat.Symbolic, pubKey, privKey, scheme) ) ) + + override def close(): Unit = () } diff --git a/community/testing/src/main/scala/com/digitalasset/canton/ledger/api/MockMessages.scala b/community/testing/src/main/scala/com/digitalasset/canton/ledger/api/MockMessages.scala index eddb55a9b..02a1cb47b 100644 --- a/community/testing/src/main/scala/com/digitalasset/canton/ledger/api/MockMessages.scala +++ b/community/testing/src/main/scala/com/digitalasset/canton/ledger/api/MockMessages.scala @@ -22,7 +22,6 @@ object MockMessages { val participantBegin: ParticipantOffset = ParticipantOffset( ParticipantOffset.Value.Boundary(PARTICIPANT_BOUNDARY_BEGIN) ) - val ledgerId = "ledgerId" val workflowId = "workflowId" val applicationId = "applicationId" val commandId = "commandId" diff --git a/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala b/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala index 8de0caaa1..6b8076936 100644 --- a/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala +++ b/community/testing/src/main/scala/com/digitalasset/canton/store/db/DbStorageSetup.scala @@ -247,7 +247,7 @@ class PostgresTestContainerSetup( postgresContainer.getUsername, postgresContainer.getPassword, postgresContainer.getDatabaseName, - postgresContainer.getContainerIpAddress, + postgresContainer.getHost, postgresContainer.getFirstMappedPort, ) diff --git a/community/util-logging/src/main/scala/com/digitalasset/canton/metrics/OnDemandMetricsReader.scala b/community/util-logging/src/main/scala/com/digitalasset/canton/metrics/OnDemandMetricsReader.scala index afdbf26b2..8b85ba18f 100644 --- a/community/util-logging/src/main/scala/com/digitalasset/canton/metrics/OnDemandMetricsReader.scala +++ b/community/util-logging/src/main/scala/com/digitalasset/canton/metrics/OnDemandMetricsReader.scala @@ -4,8 +4,9 @@ package com.digitalasset.canton.metrics import io.opentelemetry.sdk.common.CompletableResultCode +import io.opentelemetry.sdk.metrics.InstrumentType import io.opentelemetry.sdk.metrics.data.{AggregationTemporality, MetricData} -import io.opentelemetry.sdk.metrics.`export`.{MetricProducer, MetricReader, MetricReaderFactory} +import io.opentelemetry.sdk.metrics.export.{CollectionRegistration, MetricReader} import org.slf4j.LoggerFactory import java.util.concurrent.atomic.AtomicReference @@ -23,23 +24,19 @@ object OnDemandMetricsReader { } -class OpenTelemetryOnDemandMetricsReader - extends MetricReaderFactory - with MetricReader - with OnDemandMetricsReader { +class OpenTelemetryOnDemandMetricsReader extends MetricReader with OnDemandMetricsReader { private val logger = LoggerFactory.getLogger(getClass) - private val optionalProducer = new AtomicReference[Option[MetricProducer]](None) + private val optionalProducer = new AtomicReference[Option[CollectionRegistration]](None) - override def apply(producer: MetricProducer): MetricReader = { - optionalProducer.set(Some(producer)) - this - } + override def register(registration: CollectionRegistration): Unit = + optionalProducer.set(Some(registration)) - override def getPreferredTemporality: AggregationTemporality = AggregationTemporality.CUMULATIVE + override def forceFlush(): CompletableResultCode = CompletableResultCode.ofSuccess() - override def flush(): CompletableResultCode = CompletableResultCode.ofSuccess() + override def getAggregationTemporality(instrumentType: InstrumentType): AggregationTemporality = + AggregationTemporality.CUMULATIVE override def shutdown(): CompletableResultCode = { optionalProducer.set(None) diff --git a/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala b/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala index c788cb115..c6a8e83a8 100644 --- a/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala +++ b/community/util-logging/src/main/scala/com/digitalasset/canton/telemetry/OpenTelemetryFactory.scala @@ -4,18 +4,25 @@ package com.digitalasset.canton.telemetry import com.daml.metrics.HistogramDefinition -import com.daml.telemetry.OpenTelemetryOwner.addViewsToProvider +import com.daml.metrics.api.MetricHandle.Histogram +import com.daml.metrics.api.opentelemetry.OpenTelemetryTimer import com.digitalasset.canton.logging.{NamedLoggerFactory, TracedLogger} import com.digitalasset.canton.metrics.OnDemandMetricsReader.NoOpOnDemandMetricsReader$ import com.digitalasset.canton.metrics.OpenTelemetryOnDemandMetricsReader import com.digitalasset.canton.tracing.{NoopSpanExporter, TraceContext, TracingConfig} import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator import io.opentelemetry.context.propagation.ContextPropagators -import io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter import io.opentelemetry.exporter.otlp.trace.OtlpGrpcSpanExporter import io.opentelemetry.exporter.zipkin.ZipkinSpanExporter import io.opentelemetry.sdk.OpenTelemetrySdk -import io.opentelemetry.sdk.metrics.{SdkMeterProvider, SdkMeterProviderBuilder} +import io.opentelemetry.sdk.metrics.{ + Aggregation, + InstrumentSelector, + InstrumentType, + SdkMeterProvider, + SdkMeterProviderBuilder, + View, +} import io.opentelemetry.sdk.trace.`export`.{ BatchSpanProcessor, BatchSpanProcessorBuilder, @@ -24,8 +31,8 @@ import io.opentelemetry.sdk.trace.`export`.{ import io.opentelemetry.sdk.trace.samplers.Sampler import io.opentelemetry.sdk.trace.{SdkTracerProvider, SdkTracerProviderBuilder} -import java.util.concurrent.TimeUnit import scala.concurrent.duration.FiniteDuration +import scala.jdk.CollectionConverters.SeqHasAsJava import scala.jdk.DurationConverters.ScalaDurationOps import scala.util.chaining.scalaUtilChainingOps @@ -97,11 +104,6 @@ object OpenTelemetryFactory { } private def createExporter(config: TracingConfig.Exporter): SpanExporter = config match { - case TracingConfig.Exporter.Jaeger(address, port) => - JaegerGrpcSpanExporter.builder - .setEndpoint(s"http://$address:$port") - .setTimeout(30, TimeUnit.SECONDS) - .build case TracingConfig.Exporter.Zipkin(address, port) => val httpUrl = s"http://$address:$port/api/v2/spans" ZipkinSpanExporter.builder.setEndpoint(httpUrl).build @@ -124,4 +126,67 @@ object OpenTelemetryFactory { if (config.parentBased) Sampler.parentBased(sampler) else sampler } + def addViewsToProvider( + builder: SdkMeterProviderBuilder, + histograms: Seq[HistogramDefinition], + ): SdkMeterProviderBuilder = { + // Only one view is going to be applied, and it's in the order of it's definition + // therefore the config views must be registered first to be able to override the code defined views + // TODO(#17917) Note: the above comment does not match what is written here https://opentelemetry.io/docs/specs/otel/metrics/sdk/#measurement-processing + // what happens is that we get two views that don't set their name and therefore conflict + // Therefore, fix this upstream and remove the methods here again + val builderWithCustomViews = histograms.foldRight(builder) { case (histogram, builder) => + builder.registerView( + histogramSelectorByName(histogram.name), + explicitHistogramBucketsView(histogram.bucketBoundaries), + ) + } + builderWithCustomViews + // generic timing buckets + .registerView( + histogramSelectorByName(s"*${OpenTelemetryTimer.TimerUnitAndSuffix}"), + explicitHistogramBucketsView( + Seq( + 0.01d, 0.025d, 0.050d, 0.075d, 0.1d, 0.15d, 0.2d, 0.25d, 0.35d, 0.5d, 0.75d, 1d, 2.5d, + 5d, 10d, + ) + ), + ) + // use size specific buckets + .registerView( + histogramSelectorByName(s"*${Histogram.Bytes}"), + explicitHistogramBucketsView( + Seq( + kilobytes(10), + kilobytes(50), + kilobytes(100), + kilobytes(500), + megabytes(1), + megabytes(5), + megabytes(10), + megabytes(50), + ) + ), + ) + } + + private def histogramSelectorByName(stringWithWildcards: String) = InstrumentSelector + .builder() + .setType(InstrumentType.HISTOGRAM) + .setName(stringWithWildcards) + .build() + + private def explicitHistogramBucketsView(buckets: Seq[Double]) = View + .builder() + .setAggregation( + Aggregation.explicitBucketHistogram( + buckets.map(Double.box).asJava + ) + ) + .build() + + private def kilobytes(value: Int): Double = value * 1024d + + private def megabytes(value: Int): Double = value * 1024d * 1024d + } diff --git a/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala b/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala index 57972789f..245a2a04d 100644 --- a/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala +++ b/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracerProvider.scala @@ -16,7 +16,6 @@ import io.opentelemetry.sdk.resources.Resource import io.opentelemetry.sdk.trace.SdkTracerProvider import io.opentelemetry.sdk.trace.data.SpanData import io.opentelemetry.sdk.trace.export.{SimpleSpanProcessor, SpanExporter} -import io.opentelemetry.semconv.resource.attributes.ResourceAttributes import java.util @@ -58,7 +57,7 @@ private[tracing] class TracerProviderWithBuilder( .foldRight(Attributes.builder()) { case ((key, value), builder) => builder.put(s"canton.$key", value) } - .put(ResourceAttributes.SERVICE_NAME, name) + .put("service.name", name) .build() val serviceNameResource = Resource.create(attrs) configuredOpenTelemetry.tracerProviderBuilder @@ -70,7 +69,7 @@ private[tracing] class TracerProviderWithBuilder( OpenTelemetrySdk.builder .setPropagators(configuredOpenTelemetry.openTelemetry.getPropagators) .setMeterProvider(configuredOpenTelemetry.openTelemetry.getSdkMeterProvider) - .setLogEmitterProvider(configuredOpenTelemetry.openTelemetry.getSdkLogEmitterProvider) + .setLoggerProvider(configuredOpenTelemetry.openTelemetry.getSdkLoggerProvider) .setTracerProvider(tracerProvider) .build diff --git a/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracingConfig.scala b/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracingConfig.scala index 34375356c..b4c6b1883 100644 --- a/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracingConfig.scala +++ b/community/util-logging/src/main/scala/com/digitalasset/canton/tracing/TracingConfig.scala @@ -48,7 +48,6 @@ object TracingConfig { sealed trait Exporter object Exporter { case object Disabled extends Exporter - final case class Jaeger(address: String = "localhost", port: Int = 14250) extends Exporter final case class Zipkin(address: String = "localhost", port: Int = 9411) extends Exporter final case class Otlp(address: String = "localhost", port: Int = 4318) extends Exporter } diff --git a/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorResource.scala b/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorResource.scala index 01cb76186..2a3588d74 100644 --- a/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorResource.scala +++ b/daml-common-staging/daml-errors/src/main/scala/com/daml/error/ErrorResource.scala @@ -20,7 +20,6 @@ object ErrorResource { lazy val DalfPackage: ErrorResource = ErrorResource("PACKAGE") lazy val TemplateId: ErrorResource = ErrorResource("TEMPLATE_ID") lazy val InterfaceId: ErrorResource = ErrorResource("INTERFACE_ID") - lazy val LedgerId: ErrorResource = ErrorResource("LEDGER_ID") lazy val PackageName: ErrorResource = ErrorResource("PACKAGE_NAME") lazy val CommandId: ErrorResource = ErrorResource("COMMAND_ID") lazy val Party: ErrorResource = ErrorResource("PARTY") @@ -49,7 +48,6 @@ object ErrorResource { ExceptionValue, IdentityProviderConfig, InterfaceId, - LedgerId, PackageName, Parties, Party, diff --git a/dependencies.json b/dependencies.json index 8624d8a40..0c438a7e0 100644 --- a/dependencies.json +++ b/dependencies.json @@ -137,10 +137,11 @@ "opentelemetry-sdk", "opentelemetry-sdk-testing", "opentelemetry-exporter-zipkin", - "opentelemetry-exporter-jaeger", - "opentelemetry-exporter-otlp-trace" + "opentelemetry-exporter-otlp", + "opentelemetry-exporter-common", + "opentelemetry-sdk-extension-autoconfigure" ], - "version": "1.12.0", + "version": "1.36.0", "description": "Opentelemetry libraries", "users": [ "Canton", @@ -150,10 +151,9 @@ { "org": "io.opentelemetry", "artifacts": [ - "opentelemetry-sdk-extension-autoconfigure", "opentelemetry-exporter-prometheus" ], - "version": "1.12.0-alpha", + "version": "1.36.0-alpha", "description": "Opentelemetry libraries, alpha version", "users": [ "Canton", @@ -164,10 +164,10 @@ "org": "io.opentelemetry.instrumentation", "artifacts": [ "opentelemetry-grpc-1.6", - "opentelemetry-runtime-metrics" + "opentelemetry-runtime-telemetry-java8" ], - "version": "1.12.0-alpha", - "description": "Opentelemetry instrumentation libraries, alpha version", + "version": "2.1.0-alpha", + "description": "Opentelemetry instrumentation libraries, alpha (unstable apis) version", "users": [ "Canton", "DamlSDK" diff --git a/project/BuildCommon.scala b/project/BuildCommon.scala index 43742927b..bff97e193 100644 --- a/project/BuildCommon.scala +++ b/project/BuildCommon.scala @@ -399,6 +399,8 @@ object BuildCommon { case path if path.contains("module-info.class") => MergeStrategy.discard case PathList("org", "jline", _ @_*) => MergeStrategy.first case "META-INF/FastDoubleParser-LICENSE" => MergeStrategy.first + // complains about okio.kotlin_module clash + case PathList("META-INF", "okio.kotlin_module") => MergeStrategy.last case x => oldStrategy(x) } @@ -419,6 +421,7 @@ object BuildCommon { Compile / compile / wartremoverErrors += Wart.custom("com.digitalasset.canton.NonUnitForEach"), wartremoverErrors += Wart.custom("com.digitalasset.canton.RequireBlocking"), wartremoverErrors += Wart.custom("com.digitalasset.canton.SlickString"), + wartremoverErrors += Wart.custom("com.digitalasset.canton.SynchronizedFuture"), wartremoverErrors += Wart.custom("com.digitalasset.canton.TryFailed"), wartremover.WartRemover.dependsOnLocalProjectWarts(CommunityProjects.`wartremover-extension`), ).flatMap(_.settings) @@ -533,6 +536,7 @@ object BuildCommon { scala_collection_contrib, pureconfig_core, pureconfig_generic, + shapeless, scalatest % Test, mockito_scala % Test, scalatestMockito % Test, @@ -581,12 +585,8 @@ object BuildCommon { log4j_api, opentelemetry_api, opentelemetry_sdk, - opentelemetry_sdk_autoconfigure, - opentelemetry_instrumentation_grpc, opentelemetry_exporter_zipkin, - opentelemetry_exporter_jaeger, opentelemetry_exporter_otlp, - opentelemetry_exporter_prometheus, ), dependencyOverrides ++= Seq(log4j_core, log4j_api), coverageEnabled := false, @@ -620,7 +620,6 @@ object BuildCommon { pekko_http_testkit % Test, cats, better_files, - opentelemetry_instrumentation_runtime_metrics, monocle_macro, scala_logging, ), @@ -666,6 +665,12 @@ object BuildCommon { jul_to_slf4j, pureconfig_cats, pureconfig_core, + opentelemetry_instrumentation_grpc, + opentelemetry_instrumentation_runtime_metrics, + opentelemetry_exporter_otlp, + opentelemetry_exporter_prometheus, + opentelemetry_exporter_common, + opentelemetry_sdk_autoconfigure, ), ) @@ -798,7 +803,6 @@ object BuildCommon { opentelemetry_sdk_autoconfigure, opentelemetry_instrumentation_grpc, opentelemetry_exporter_zipkin, - opentelemetry_exporter_jaeger, opentelemetry_exporter_otlp, ), dependencyOverrides ++= Seq(log4j_core, log4j_api), @@ -1245,6 +1249,7 @@ object BuildCommon { commons_codec, commons_io, daml_metrics, + daml_libs_scala_ledger_resources, daml_lf_archive_reader, daml_lf_transaction, daml_lf_engine, @@ -1257,6 +1262,7 @@ object BuildCommon { grpc_api, reflections, grpc_netty, + scopt, netty_boring_ssl, // This should be a Runtime dep, but needs to be declared at Compile scope due to https://github.com/sbt/sbt/issues/5568 netty_handler, scalapb_runtime, diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 49e9cd8a4..a4302fe0d 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -62,7 +62,7 @@ object Dependencies { lazy val oracle_version = "19.18.0.0" lazy val postgres_version = "42.6.0" lazy val pprint_version = "0.8.1" - lazy val prometheus_version = "0.14.1" + lazy val prometheus_version = "0.16.0" lazy val pureconfig_version = "0.14.0" lazy val reflections_version = "0.10.2" lazy val scaffeine_version = "5.2.1" @@ -82,7 +82,7 @@ object Dependencies { lazy val snakeyaml_version = "2.0" lazy val spray_json_derived_codecs_version = "2.3.10" lazy val sttp_version = "3.8.16" - lazy val testcontainers_version = "1.15.1" + lazy val testcontainers_version = "1.19.7" lazy val tink_version = "1.3.0" lazy val toxiproxy_java_version = "2.1.7" @@ -239,10 +239,10 @@ object Dependencies { resolveDependency("io.opentelemetry", "opentelemetry-sdk-extension-autoconfigure") lazy val opentelemetry_exporter_zipkin = resolveDependency("io.opentelemetry", "opentelemetry-exporter-zipkin") - lazy val opentelemetry_exporter_jaeger = - resolveDependency("io.opentelemetry", "opentelemetry-exporter-jaeger") lazy val opentelemetry_exporter_otlp = - resolveDependency("io.opentelemetry", "opentelemetry-exporter-otlp-trace") + resolveDependency("io.opentelemetry", "opentelemetry-exporter-otlp") + lazy val opentelemetry_exporter_common = + resolveDependency("io.opentelemetry", "opentelemetry-exporter-common") lazy val opentelemetry_exporter_prometheus = resolveDependency("io.opentelemetry", "opentelemetry-exporter-prometheus") lazy val opentelemetry_proto = @@ -251,7 +251,7 @@ object Dependencies { lazy val opentelemetry_instrumentation_grpc = resolveDependency("io.opentelemetry.instrumentation", "opentelemetry-grpc-1.6") lazy val opentelemetry_instrumentation_runtime_metrics = - resolveDependency("io.opentelemetry.instrumentation", "opentelemetry-runtime-metrics") + resolveDependency("io.opentelemetry.instrumentation", "opentelemetry-runtime-telemetry-java8") lazy val better_files = "com.github.pathikrit" %% "better-files" % better_files_version @@ -350,6 +350,8 @@ object Dependencies { lazy val daml_libs_struct_spray_json = "com.daml" %% "struct-spray-json" % daml_libraries_version lazy val daml_libs_scala_scalatest_utils = "com.daml" %% "scalatest-utils" % daml_libraries_version + lazy val daml_libs_scala_ledger_resources = + "com.daml" %% "ledger-resources" % daml_libraries_version lazy val daml_rs_grpc_pekko = "com.daml" %% "rs-grpc-pekko" % daml_libraries_version lazy val daml_lf_encoder = "com.daml" %% "daml-lf-encoder" % daml_libraries_version lazy val daml_lf_api_type_signature = diff --git a/project/project/DamlVersions.scala b/project/project/DamlVersions.scala index 299d136c8..6fee25101 100644 --- a/project/project/DamlVersions.scala +++ b/project/project/DamlVersions.scala @@ -7,7 +7,7 @@ object DamlVersions { /** The version of the daml compiler (and in most cases of the daml libraries as well). */ - val version: String = "3.0.0-snapshot.20240312.12878.0.v540a7460" + val version: String = "3.0.0-snapshot.20240318.12913.0.v1c415c97" /** Custom Daml artifacts override version. */