From 082240809f82f58453cb6b9569ebd746cf161fcb Mon Sep 17 00:00:00 2001 From: Iurii Kravchenko Date: Thu, 2 Oct 2025 14:44:33 +0200 Subject: [PATCH 1/5] Incorrect memory usage fix (#25725) --- ydb/core/formats/arrow/program/abstract.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ydb/core/formats/arrow/program/abstract.h b/ydb/core/formats/arrow/program/abstract.h index 4d93f4f39b87..6a2bdd864f86 100644 --- a/ydb/core/formats/arrow/program/abstract.h +++ b/ydb/core/formats/arrow/program/abstract.h @@ -48,12 +48,14 @@ class TFetchingCalculationPolicy: public IMemoryCalculationPolicy { return EStage::Fetching; } virtual ui64 GetReserveMemorySize( - const ui64 blobsSize, const ui64 rawSize, const std::optional limit, const ui32 recordsCount) const override { - if (limit) { - return std::max(blobsSize, rawSize * (1.0 * *limit) / recordsCount); - } else { - return std::max(blobsSize, rawSize); - } + const ui64 blobsSize, const ui64 rawSize, const std::optional /*limit*/, const ui32 /*recordsCount*/) const override { + return std::max(blobsSize, rawSize); + // FIXME after futher memory usage investagiation + // if (limit) { + // return std::max(blobsSize, rawSize * (1.0 * *limit) / recordsCount); + // } else { + // return std::max(blobsSize, rawSize); + // } } }; From e78296b7383d4c4e1cdc5a4f7fc74161f84593be Mon Sep 17 00:00:00 2001 From: neyrox Date: Tue, 7 Oct 2025 13:24:15 +0300 Subject: [PATCH 2/5] Mark sorting when we made it up as fake (#26340) --- .../columnshard/engines/reader/abstract/read_metadata.h | 1 + .../engines/reader/simple_reader/iterator/scanner.cpp | 9 +++++---- .../columnshard/engines/reader/transaction/tx_scan.cpp | 3 +++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h b/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h index 099aba654c19..25ea81fe7f91 100644 --- a/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h +++ b/ydb/core/tx/columnshard/engines/reader/abstract/read_metadata.h @@ -24,6 +24,7 @@ class TReadMetadataBase { private: YDB_ACCESSOR_DEF(TString, ScanIdentifier); + YDB_ACCESSOR_DEF(bool, FakeSort); std::optional FilteredCountLimit; std::optional RequestedLimit; const ESorting Sorting = ESorting::ASC; // Sorting inside returned batches diff --git a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp index 9dcd6d2292bd..c6a0d4764913 100644 --- a/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp +++ b/ydb/core/tx/columnshard/engines/reader/simple_reader/iterator/scanner.cpp @@ -21,14 +21,15 @@ TConclusionStatus TScanHead::Start() { TScanHead::TScanHead(std::unique_ptr&& sourcesConstructor, const std::shared_ptr& context) : Context(context) { + auto readMetadataContext = context->GetReadMetadata(); if (auto script = Context->GetSourcesAggregationScript()) { SourcesCollection = - std::make_shared(Context, std::move(sourcesConstructor), Context->GetReadMetadata()->GetLimitRobustOptional()); + std::make_shared(Context, std::move(sourcesConstructor), readMetadataContext->GetLimitRobustOptional()); SyncPoints.emplace_back(std::make_shared(SyncPoints.size(), context, SourcesCollection)); SyncPoints.emplace_back(std::make_shared( SourcesCollection, Context->GetSourcesAggregationScript(), Context->GetRestoreResultScript(), SyncPoints.size(), context)); - } else if (Context->GetReadMetadata()->IsSorted()) { - if (Context->GetReadMetadata()->HasLimit()) { + } else if (readMetadataContext->IsSorted()) { + if (readMetadataContext->HasLimit() && !readMetadataContext->GetFakeSort()) { auto collection = std::make_shared(Context, std::move(sourcesConstructor)); SourcesCollection = collection; SyncPoints.emplace_back(std::make_shared( @@ -39,7 +40,7 @@ TScanHead::TScanHead(std::unique_ptr&& sourcesCons SyncPoints.emplace_back(std::make_shared(SyncPoints.size(), context, SourcesCollection)); } else { SourcesCollection = - std::make_shared(Context, std::move(sourcesConstructor), Context->GetReadMetadata()->GetLimitRobustOptional()); + std::make_shared(Context, std::move(sourcesConstructor), readMetadataContext->GetLimitRobustOptional()); SyncPoints.emplace_back(std::make_shared(SyncPoints.size(), context, SourcesCollection)); } for (ui32 i = 0; i + 1 < SyncPoints.size(); ++i) { diff --git a/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp b/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp index 2ef765ba5d38..60e2842b2798 100644 --- a/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp +++ b/ydb/core/tx/columnshard/engines/reader/transaction/tx_scan.cpp @@ -147,6 +147,9 @@ void TTxScan::Complete(const TActorContext& ctx) { } auto newRange = scannerConstructor->BuildReadMetadata(Self, read); if (newRange.IsSuccess()) { + if (!request.HasReverse() && deduplicationEnabled) { + (*newRange)->SetFakeSort(true); + } readMetadataRange = TValidator::CheckNotNull(newRange.DetachResult()); } else { return SendError("cannot build metadata", newRange.GetErrorMessage(), ctx); From d00b102fb00d4b8fa0b2080cdd122443b14dd8bc Mon Sep 17 00:00:00 2001 From: Nikita Vasilev Date: Tue, 7 Oct 2025 18:10:12 +0300 Subject: [PATCH 3/5] Fix upsert to table with unique index (#26489) --- .../effects/kqp_opt_phy_upsert_index.cpp | 8 +- ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp | 114 ++++++++++++++++++ 2 files changed, 117 insertions(+), 5 deletions(-) diff --git a/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp b/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp index 3e8d722bc82b..e2811873ad95 100644 --- a/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp +++ b/ydb/core/kqp/opt/physical/effects/kqp_opt_phy_upsert_index.cpp @@ -339,11 +339,7 @@ RewriteInputForConstraint(const TExprBase& inputRows, const THashSet hasUniqIndex |= (indexDesc->Type == TIndexDescription::EType::GlobalSyncUnique); for (const auto& indexKeyCol : indexDesc->KeyColumns) { if (inputColumns.contains(indexKeyCol)) { - if (!usedIndexes.contains(indexDesc->Name) && - std::find(mainPk.begin(), mainPk.end(), indexKeyCol) == mainPk.end()) - { - usedIndexes.insert(indexDesc->Name); - } + usedIndexes.insert(indexDesc->Name); } else { // input always contains key columns YQL_ENSURE(std::find(mainPk.begin(), mainPk.end(), indexKeyCol) == mainPk.end()); @@ -352,6 +348,8 @@ RewriteInputForConstraint(const TExprBase& inputRows, const THashSet } } + AFL_ENSURE(!hasUniqIndex || !usedIndexes.empty()); + if (!hasUniqIndex) { missedKeyInput.clear(); } diff --git a/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp b/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp index 9744a4355292..ea862e02872c 100644 --- a/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp +++ b/ydb/core/kqp/ut/indexes/kqp_indexes_ut.cpp @@ -5972,6 +5972,120 @@ R"([[#;#;["Primary1"];[41u]];[["Secondary2"];[2u];["Primary2"];[42u]];[["Seconda ); } } + + Y_UNIT_TEST_TWIN(IndexUpsert, Uniq) { + auto setting = NKikimrKqp::TKqpSetting(); + auto serverSettings = TKikimrSettings().SetKqpSettings({setting}); + TKikimrRunner kikimr(serverSettings); + + auto client = kikimr.GetQueryClient(); + + { + const TString query(Q_(std::format(R"( + CREATE TABLE `/Root/TestTable` ( + a Int32, + b Int32, + PRIMARY KEY(a,b), + INDEX ix_b GLOBAL {} SYNC ON (b) + ); + )", Uniq ? "UNIQUE" : ""))); + + auto result = client.ExecuteQuery( + query, + NQuery::TTxControl::NoTx()) + .ExtractValueSync(); + UNIT_ASSERT(result.IsSuccess()); + } + + { + const TString query(Q_(R"( + $v=[<|a:10,b:20|>,<|a:30,b:20|>]; + UPSERT INTO `/Root/TestTable` SELECT * FROM AS_TABLE($v); + )")); + + auto result = client.ExecuteQuery( + query, + NQuery::TTxControl::NoTx()) + .ExtractValueSync(); + if (Uniq) { + UNIT_ASSERT_C(!result.IsSuccess(), result.GetIssues().ToString()); + UNIT_ASSERT_STRING_CONTAINS_C( + result.GetIssues().ToString(), + "Duplicated keys found.", + result.GetIssues().ToString()); + } else { + UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString()); + } + } + + { + const TString query(Q_(R"( + $v=[<|a:10,b:20|>,<|a:10,b:20|>]; + UPSERT INTO `/Root/TestTable` SELECT * FROM AS_TABLE($v); + )")); + + auto result = client.ExecuteQuery( + query, + NQuery::TTxControl::NoTx()) + .ExtractValueSync(); + if (Uniq) { + UNIT_ASSERT_C(!result.IsSuccess(), result.GetIssues().ToString()); + UNIT_ASSERT_STRING_CONTAINS_C( + result.GetIssues().ToString(), + "Duplicated keys found.", + result.GetIssues().ToString()); + } else { + UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString()); + } + } + + { + const TString query(Q_(R"( + $v=[<|a:10,b:10|>,<|a:30,b:30|>]; + UPSERT INTO `/Root/TestTable` SELECT * FROM AS_TABLE($v); + )")); + + auto result = client.ExecuteQuery( + query, + NQuery::TTxControl::NoTx()) + .ExtractValueSync(); + UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString()); + } + + { + const TString query(Q_(R"( + $v=[<|a:20,b:10|>,<|a:20,b:30|>]; + UPSERT INTO `/Root/TestTable` SELECT * FROM AS_TABLE($v); + )")); + + auto result = client.ExecuteQuery( + query, + NQuery::TTxControl::NoTx()) + .ExtractValueSync(); + if (Uniq) { + UNIT_ASSERT_C(!result.IsSuccess(), result.GetIssues().ToString()); + UNIT_ASSERT_STRING_CONTAINS_C( + result.GetIssues().ToString(), + "Conflict with existing key.", + result.GetIssues().ToString()); + } else { + UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString()); + } + } + + { + const TString query(Q_(R"( + $v=[<|a:20,b:40|>,<|a:20,b:50|>]; + UPSERT INTO `/Root/TestTable` SELECT * FROM AS_TABLE($v); + )")); + + auto result = client.ExecuteQuery( + query, + NQuery::TTxControl::NoTx()) + .ExtractValueSync(); + UNIT_ASSERT_C(result.IsSuccess(), result.GetIssues().ToString()); + } + } } } From b36c57d0c12932cbd36c8e24555c81d4a3544086 Mon Sep 17 00:00:00 2001 From: Alek5andr-Kotov Date: Tue, 7 Oct 2025 15:20:45 +0300 Subject: [PATCH 4/5] Empty blobs for compaction (#26451) --- .../partition/partition_compaction.cpp | 21 ++----- .../src/client/topic/ut/topic_to_table_ut.cpp | 58 +++++++++++++++++++ 2 files changed, 62 insertions(+), 17 deletions(-) diff --git a/ydb/core/persqueue/pqtablet/partition/partition_compaction.cpp b/ydb/core/persqueue/pqtablet/partition/partition_compaction.cpp index 5918855be89d..58136a8b4507 100644 --- a/ydb/core/persqueue/pqtablet/partition/partition_compaction.cpp +++ b/ydb/core/persqueue/pqtablet/partition/partition_compaction.cpp @@ -178,41 +178,28 @@ void TPartition::TryRunCompaction() const ui64 blobsKeyCountLimit = GetBodyKeysCountLimit(); const ui64 compactedBlobSizeLowerBound = GetCompactedBlobSizeLowerBound(); - if (BlobEncoder.DataKeysBody.size() >= blobsKeyCountLimit) { - CompactionInProgress = true; - Send(SelfId(), new TEvPQ::TEvRunCompaction(BlobEncoder.DataKeysBody.size())); + if ((BlobEncoder.DataKeysBody.size() < blobsKeyCountLimit) && (BlobEncoder.GetSize() < GetCumulativeSizeLimit())) { + LOG_D("No data for blobs compaction"); return; } - size_t blobsCount = 0, blobsSize = 0, totalSize = 0; + size_t blobsCount = 0, blobsSize = 0; for (; blobsCount < BlobEncoder.DataKeysBody.size(); ++blobsCount) { const auto& k = BlobEncoder.DataKeysBody[blobsCount]; if (k.Size < compactedBlobSizeLowerBound) { // неполный блоб. можно дописать blobsSize += k.Size; - totalSize += k.Size; if (blobsSize > 2 * MaxBlobSize) { // KV не может отдать много blobsSize -= k.Size; - totalSize -= k.Size; break; } LOG_D("Blob key for append " << k.Key.ToString()); } else { - totalSize += k.Size; LOG_D("Blob key for rename " << k.Key.ToString()); } } - LOG_D(blobsCount << " keys were taken away. Let's read " << blobsSize << " bytes (" << totalSize << ")"); - - if (totalSize < GetCumulativeSizeLimit()) { - LOG_D("Need more data for compaction. " << - "Blobs " << BlobEncoder.DataKeysBody.size() << - ", size " << totalSize << " (" << GetCumulativeSizeLimit() << ")"); - return; - } - - LOG_D("Run compaction for " << blobsCount << " blobs"); + LOG_D(blobsCount << " keys were taken away. Let's read " << blobsSize << " bytes"); CompactionInProgress = true; diff --git a/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp b/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp index b797c4ac234d..457d440cbf83 100644 --- a/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp +++ b/ydb/public/sdk/cpp/src/client/topic/ut/topic_to_table_ut.cpp @@ -3358,6 +3358,64 @@ Y_UNIT_TEST_F(Write_And_Read_Gigant_Messages_2, TFixtureNoClient) TestWriteAndReadMessages(4, 61'000'000, true); } +Y_UNIT_TEST_F(Write_50k_100times_50tx, TFixtureTable) +{ + // 100 transactions. Write 100 50KB messages in each folder. Call the commit at the same time. + // As a result, there will be a lot of small blobs in the FastWrite zone of the main batch, + // which will be picked up by a compact. The scenario is similar to the work of Ya.Metrika. + + const std::size_t PARTITIONS_COUNT = 2; + const std::size_t TXS_COUNT = 50; + + auto makeSourceId = [](unsigned txId, unsigned partitionId) { + std::string sourceId = TEST_MESSAGE_GROUP_ID; + sourceId += "_"; + sourceId += ToString(txId); + sourceId += "_"; + sourceId += ToString(partitionId); + return sourceId; + }; + + CreateTopic("topic_A", TEST_CONSUMER, PARTITIONS_COUNT); + + SetPartitionWriteSpeed("topic_A", 50'000'000); + + std::vector> sessions; + std::vector> transactions; + + for (std::size_t i = 0; i < TXS_COUNT; ++i) { + sessions.push_back(CreateSession()); + auto& session = sessions.back(); + + transactions.push_back(session->BeginTx()); + auto& tx = transactions.back(); + + auto sourceId = makeSourceId(i, 0); + for (size_t j = 0; j < 100; ++j) { + WriteToTopic("topic_A", sourceId, std::string(50'000, 'x'), tx.get(), 0); + } + WaitForAcks("topic_A", sourceId); + + sourceId = makeSourceId(i, 1); + WriteToTopic("topic_A", sourceId, std::string(50'000, 'x'), tx.get(), 1); + WaitForAcks("topic_A", sourceId); + } + + // We are doing an asynchronous commit of transactions. They will be executed simultaneously. + std::vector futures; + + for (std::size_t i = 0; i < TXS_COUNT; ++i) { + futures.push_back(sessions[i]->AsyncCommitTx(*transactions[i])); + } + + // All transactions must be completed successfully. + for (std::size_t i = 0; i < TXS_COUNT; ++i) { + futures[i].Wait(); + const auto& result = futures[i].GetValueSync(); + UNIT_ASSERT_EQUAL_C(result.GetStatus(), EStatus::SUCCESS, result.GetIssues().ToString()); + } +} + } } From 21ae24fbd5f628a5600913a57f6a0244f9f55dd7 Mon Sep 17 00:00:00 2001 From: Aleksei Borzenkov Date: Wed, 8 Oct 2025 12:22:43 +0300 Subject: [PATCH 5/5] Make it possible to backup out-of-range decimal values (#26510) --- ydb/core/tx/datashard/type_serialization.cpp | 13 +- .../tx/schemeshard/ut_export/ut_export.cpp | 120 ++++++++++++++++++ 2 files changed, 130 insertions(+), 3 deletions(-) diff --git a/ydb/core/tx/datashard/type_serialization.cpp b/ydb/core/tx/datashard/type_serialization.cpp index d48f630b39d3..38ff1bb630d0 100644 --- a/ydb/core/tx/datashard/type_serialization.cpp +++ b/ydb/core/tx/datashard/type_serialization.cpp @@ -15,7 +15,10 @@ TString DecimalToString(const std::pair& loHi, const NScheme::TTypeIn using namespace NYql::NDecimal; TInt128 val = FromHalfs(loHi.first, loHi.second); - return ToString(val, typeInfo.GetDecimalType().GetPrecision(), typeInfo.GetDecimalType().GetScale()); + const char* result = ToString(val, MaxPrecision /*typeInfo.GetDecimalType().GetPrecision()*/, typeInfo.GetDecimalType().GetScale()); + Y_ENSURE(result); + + return result; } TString DyNumberToString(TStringBuf data) { @@ -36,11 +39,15 @@ TString PgToString(TStringBuf data, const NScheme::TTypeInfo& typeInfo) { } bool DecimalToStream(const std::pair& loHi, IOutputStream& out, TString& err, const NScheme::TTypeInfo& typeInfo) { - Y_UNUSED(err); using namespace NYql::NDecimal; TInt128 val = FromHalfs(loHi.first, loHi.second); - out << ToString(val, typeInfo.GetDecimalType().GetPrecision(), typeInfo.GetDecimalType().GetScale()); + const char* result = ToString(val, MaxPrecision /*typeInfo.GetDecimalType().GetPrecision()*/, typeInfo.GetDecimalType().GetScale()); + if (!result) [[unlikely]] { + err = "Invalid Decimal binary representation"; + return false; + } + out << result; return true; } diff --git a/ydb/core/tx/schemeshard/ut_export/ut_export.cpp b/ydb/core/tx/schemeshard/ut_export/ut_export.cpp index 6f6a330da143..500273fb9c1e 100644 --- a/ydb/core/tx/schemeshard/ut_export/ut_export.cpp +++ b/ydb/core/tx/schemeshard/ut_export/ut_export.cpp @@ -2969,4 +2969,124 @@ attributes { NLs::IndexState(NKikimrSchemeOp::EIndexStateReady), NLs::IndexKeys({"value"})}); } + + Y_UNIT_TEST(DecimalOutOfRange) { + EnvOptions().DisableStatsBatching(true); + Env(); // Init test env + ui64 txId = 100; + + TestCreateTable(Runtime(), ++txId, "/MyRoot", R"( + Name: "Table1" + Columns { Name: "key" Type: "Uint64" } + Columns { Name: "value" Type: "Decimal" } + KeyColumnNames: ["key"] + )"); + Env().TestWaitNotification(Runtime(), txId); + + // Write a normal decimal value + // 10.0^13-1 (scale 9) = 0x21e19e0c9ba76a53600 + { + ui64 key = 1u; + std::pair value = { 0x19e0c9ba76a53600ULL, 0x21eULL }; + UploadRow(Runtime(), "/MyRoot/Table1", 0, {1}, {2}, {TCell::Make(key)}, {TCell::Make(value)}); + } + // Write a decimal value that is out of range for precision 22 + // 10.0^13 (scale 9) = 10^22 = 0x21e19e0c9bab2400000 + { + ui64 key = 2u; + std::pair value = { 0x19e0c9bab2400000ULL, 0x21eULL }; + UploadRow(Runtime(), "/MyRoot/Table1", 0, {1}, {2}, {TCell::Make(key)}, {TCell::Make(value)}); + } + + TestExport(Runtime(), ++txId, "/MyRoot", Sprintf(R"( + ExportToS3Settings { + endpoint: "localhost:%d" + scheme: HTTP + items { + source_path: "/MyRoot/Table1" + destination_prefix: "Backup1" + } + } + )", S3Port())); + Env().TestWaitNotification(Runtime(), txId); + + TestGetExport(Runtime(), txId, "/MyRoot", Ydb::StatusIds::SUCCESS); + + UNIT_ASSERT(HasS3File("/Backup1/metadata.json")); + UNIT_ASSERT(HasS3File("/Backup1/data_00.csv")); + UNIT_ASSERT_STRINGS_EQUAL(GetS3FileContent("/Backup1/data_00.csv"), + "1,9999999999999\n" + "2,10000000000000\n"); + + TestImport(Runtime(), ++txId, "/MyRoot", Sprintf(R"( + ImportFromS3Settings { + endpoint: "localhost:%d" + scheme: HTTP + items { + source_prefix: "Backup1" + destination_path: "/MyRoot/Table2" + } + } + )", S3Port())); + Env().TestWaitNotification(Runtime(), txId); + + TestGetImport(Runtime(), txId, "/MyRoot", Ydb::StatusIds::SUCCESS); + + TestExport(Runtime(), ++txId, "/MyRoot", Sprintf(R"( + ExportToS3Settings { + endpoint: "localhost:%d" + scheme: HTTP + items { + source_path: "/MyRoot/Table2" + destination_prefix: "Backup2" + } + } + )", S3Port())); + Env().TestWaitNotification(Runtime(), txId); + + TestGetExport(Runtime(), txId, "/MyRoot", Ydb::StatusIds::SUCCESS); + + // Note: out-of-range values are restored as inf + UNIT_ASSERT(HasS3File("/Backup2/metadata.json")); + UNIT_ASSERT(HasS3File("/Backup2/data_00.csv")); + UNIT_ASSERT_STRINGS_EQUAL(GetS3FileContent("/Backup2/data_00.csv"), + "1,9999999999999\n" + "2,inf\n"); + } + + Y_UNIT_TEST(CorruptedDecimalValue) { + EnvOptions().DisableStatsBatching(true); + Env(); // Init test env + ui64 txId = 100; + + TestCreateTable(Runtime(), ++txId, "/MyRoot", R"( + Name: "Table1" + Columns { Name: "key" Type: "Uint64" } + Columns { Name: "value" Type: "Decimal" } + KeyColumnNames: ["key"] + )"); + Env().TestWaitNotification(Runtime(), txId); + + // Write a decimal value that is way out of range for max precision 35 + // 10^38 = 0x4b3b4ca85a86c47a098a224000000000 + { + ui64 key = 1u; + std::pair value = { 0x098a224000000000ULL, 0x4b3b4ca85a86c47aULL }; + UploadRow(Runtime(), "/MyRoot/Table1", 0, {1}, {2}, {TCell::Make(key)}, {TCell::Make(value)}); + } + + TestExport(Runtime(), ++txId, "/MyRoot", Sprintf(R"( + ExportToS3Settings { + endpoint: "localhost:%d" + scheme: HTTP + items { + source_path: "/MyRoot/Table1" + destination_prefix: "Backup1" + } + } + )", S3Port())); + Env().TestWaitNotification(Runtime(), txId); + + TestGetExport(Runtime(), txId, "/MyRoot", Ydb::StatusIds::CANCELLED); + } }